effect
stringclasses 48
values | original_source_type
stringlengths 0
23k
| opens_and_abbrevs
listlengths 2
92
| isa_cross_project_example
bool 1
class | source_definition
stringlengths 9
57.9k
| partial_definition
stringlengths 7
23.3k
| is_div
bool 2
classes | is_type
null | is_proof
bool 2
classes | completed_definiton
stringlengths 1
250k
| dependencies
dict | effect_flags
sequencelengths 0
2
| ideal_premises
sequencelengths 0
236
| mutual_with
sequencelengths 0
11
| file_context
stringlengths 0
407k
| interleaved
bool 1
class | is_simply_typed
bool 2
classes | file_name
stringlengths 5
48
| vconfig
dict | is_simple_lemma
null | source_type
stringlengths 10
23k
| proof_features
sequencelengths 0
1
| name
stringlengths 8
95
| source
dict | verbose_type
stringlengths 1
7.42k
| source_range
dict |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Prims.Tot | val va_quick_Xor64 (dst: va_operand_dst_opr64) (src: va_operand_opr64)
: (va_quickCode unit (va_code_Xor64 dst src)) | [
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let va_quick_Xor64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Xor64 dst src)) =
(va_QProc (va_code_Xor64 dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Xor64 dst src)
(va_wpProof_Xor64 dst src)) | val va_quick_Xor64 (dst: va_operand_dst_opr64) (src: va_operand_opr64)
: (va_quickCode unit (va_code_Xor64 dst src))
let va_quick_Xor64 (dst: va_operand_dst_opr64) (src: va_operand_opr64)
: (va_quickCode unit (va_code_Xor64 dst src)) = | false | null | false | (va_QProc (va_code_Xor64 dst src)
([va_Mod_flags; va_mod_dst_opr64 dst])
(va_wp_Xor64 dst src)
(va_wpProof_Xor64 dst src)) | {
"checked_file": "Vale.X64.InsBasic.fsti.checked",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.QuickCode.fst.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.X64.Decls.fsti.checked",
"Vale.X64.CPU_Features_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.InsBasic.fsti"
} | [
"total"
] | [
"Vale.X64.Decls.va_operand_dst_opr64",
"Vale.X64.Decls.va_operand_opr64",
"Vale.X64.QuickCode.va_QProc",
"Prims.unit",
"Vale.X64.InsBasic.va_code_Xor64",
"Prims.Cons",
"Vale.X64.QuickCode.mod_t",
"Vale.X64.QuickCode.va_Mod_flags",
"Vale.X64.QuickCode.va_mod_dst_opr64",
"Prims.Nil",
"Vale.X64.InsBasic.va_wp_Xor64",
"Vale.X64.InsBasic.va_wpProof_Xor64",
"Vale.X64.QuickCode.va_quickCode"
] | [] | module Vale.X64.InsBasic
open FStar.Mul
open Vale.Def.Types_s
open Vale.Arch.HeapImpl
open Vale.Arch.Types
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.X64.Decls
open Vale.X64.QuickCode
unfold let vale_heap = Vale.X64.Memory.vale_heap
unfold let vale_stack = Vale.X64.Stack_i.vale_stack
open Vale.X64.CPU_Features_s
//-- Mov64
val va_code_Mov64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Mov64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Mov64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Mov64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == va_eval_opr64 va_s0 src /\ va_state_eq va_sM (va_update_ok va_sM
(va_update_operand_dst_opr64 dst va_sM va_s0))))
[@ va_qattr]
let va_wp_Mov64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state
-> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ (forall
(va_x_dst:va_value_dst_opr64) . let va_sM = va_upd_operand_dst_opr64 dst va_x_dst va_s0 in
va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst == va_eval_opr64 va_s0 src ==> va_k va_sM (())))
val va_wpProof_Mov64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Mov64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Mov64 dst src) ([va_mod_dst_opr64
dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Mov64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Mov64 dst src)) =
(va_QProc (va_code_Mov64 dst src) ([va_mod_dst_opr64 dst]) (va_wp_Mov64 dst src)
(va_wpProof_Mov64 dst src))
//--
//-- Cmovc64
val va_code_Cmovc64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Cmovc64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Cmovc64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Cmovc64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst
va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ Vale.X64.Decls.valid_cf (va_get_flags
va_s0)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\ (if
Vale.X64.Decls.cf (va_get_flags va_sM) then (va_eval_dst_opr64 va_sM dst = va_eval_opr64 va_s0
src) else (va_eval_dst_opr64 va_sM dst = va_eval_dst_opr64 va_s0 dst)) /\ va_state_eq va_sM
(va_update_ok va_sM (va_update_operand_dst_opr64 dst va_sM va_s0))))
[@ va_qattr]
let va_wp_Cmovc64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\
Vale.X64.Decls.valid_cf (va_get_flags va_s0) /\ (forall (va_x_dst:va_value_dst_opr64) . let
va_sM = va_upd_operand_dst_opr64 dst va_x_dst va_s0 in va_get_ok va_sM /\ va_if
(Vale.X64.Decls.cf (va_get_flags va_sM)) (fun _ -> va_eval_dst_opr64 va_sM dst = va_eval_opr64
va_s0 src) (fun _ -> va_eval_dst_opr64 va_sM dst = va_eval_dst_opr64 va_s0 dst) ==> va_k va_sM
(())))
val va_wpProof_Cmovc64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Cmovc64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Cmovc64 dst src) ([va_mod_dst_opr64
dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Cmovc64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Cmovc64 dst src)) =
(va_QProc (va_code_Cmovc64 dst src) ([va_mod_dst_opr64 dst]) (va_wp_Cmovc64 dst src)
(va_wpProof_Cmovc64 dst src))
//--
//-- Add64
val va_code_Add64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Add64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Add64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Add64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ va_eval_opr64 va_s0 src + va_eval_dst_opr64
va_s0 dst < pow2_64))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == va_eval_dst_opr64 va_s0 dst + va_eval_opr64 va_s0 src /\
va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM (va_update_operand_dst_opr64 dst
va_sM va_s0)))))
[@ va_qattr]
let va_wp_Add64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state
-> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ va_eval_opr64
va_s0 src + va_eval_dst_opr64 va_s0 dst < pow2_64 /\ (forall (va_x_dst:va_value_dst_opr64)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_operand_dst_opr64 dst
va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst == va_eval_dst_opr64 va_s0
dst + va_eval_opr64 va_s0 src ==> va_k va_sM (())))
val va_wpProof_Add64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Add64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Add64 dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Add64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Add64 dst src)) =
(va_QProc (va_code_Add64 dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Add64 dst src)
(va_wpProof_Add64 dst src))
//--
//-- Add64Wrap
val va_code_Add64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Add64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Add64Wrap : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Add64Wrap dst src) va_s0 /\ va_is_dst_dst_opr64 dst
va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.add_wrap64 (va_eval_dst_opr64 va_s0 dst)
(va_eval_opr64 va_s0 src) /\ Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64
va_s0 dst + va_eval_opr64 va_s0 src >= pow2_64) /\ va_state_eq va_sM (va_update_flags va_sM
(va_update_ok va_sM (va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Add64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ (forall
(va_x_dst:va_value_dst_opr64) (va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl
(va_upd_operand_dst_opr64 dst va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst
== Vale.Arch.Types.add_wrap64 (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src) /\
Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64 va_s0 dst + va_eval_opr64
va_s0 src >= pow2_64) ==> va_k va_sM (())))
val va_wpProof_Add64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Add64Wrap dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Add64Wrap dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Add64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Add64Wrap dst src)) =
(va_QProc (va_code_Add64Wrap dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Add64Wrap dst
src) (va_wpProof_Add64Wrap dst src))
//--
//-- AddLea64
val va_code_AddLea64 : dst:va_operand_dst_opr64 -> src1:va_operand_opr64 -> src2:va_operand_opr64
-> Tot va_code
val va_codegen_success_AddLea64 : dst:va_operand_dst_opr64 -> src1:va_operand_opr64 ->
src2:va_operand_opr64 -> Tot va_pbool
val va_lemma_AddLea64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src1:va_operand_opr64 -> src2:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_AddLea64 dst src1 src2) va_s0 /\ va_is_dst_dst_opr64
dst va_s0 /\ va_is_src_opr64 src1 va_s0 /\ va_is_src_opr64 src2 va_s0 /\ va_get_ok va_s0 /\
Vale.X64.Decls.max_one_mem src1 src2 /\ va_eval_opr64 va_s0 src1 + va_eval_opr64 va_s0 src2 <
pow2_64))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == va_eval_opr64 va_s0 src1 + va_eval_opr64 va_s0 src2 /\
va_state_eq va_sM (va_update_ok va_sM (va_update_operand_dst_opr64 dst va_sM va_s0))))
[@ va_qattr]
let va_wp_AddLea64 (dst:va_operand_dst_opr64) (src1:va_operand_opr64) (src2:va_operand_opr64)
(va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src1 va_s0 /\ va_is_src_opr64 src2 va_s0 /\
va_get_ok va_s0 /\ Vale.X64.Decls.max_one_mem src1 src2 /\ va_eval_opr64 va_s0 src1 +
va_eval_opr64 va_s0 src2 < pow2_64 /\ (forall (va_x_dst:va_value_dst_opr64) . let va_sM =
va_upd_operand_dst_opr64 dst va_x_dst va_s0 in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst
== va_eval_opr64 va_s0 src1 + va_eval_opr64 va_s0 src2 ==> va_k va_sM (())))
val va_wpProof_AddLea64 : dst:va_operand_dst_opr64 -> src1:va_operand_opr64 ->
src2:va_operand_opr64 -> va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_AddLea64 dst src1 src2 va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_AddLea64 dst src1 src2)
([va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_AddLea64 (dst:va_operand_dst_opr64) (src1:va_operand_opr64) (src2:va_operand_opr64) :
(va_quickCode unit (va_code_AddLea64 dst src1 src2)) =
(va_QProc (va_code_AddLea64 dst src1 src2) ([va_mod_dst_opr64 dst]) (va_wp_AddLea64 dst src1
src2) (va_wpProof_AddLea64 dst src1 src2))
//--
//-- Adc64
val va_code_Adc64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Adc64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Adc64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Adc64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ va_eval_opr64 va_s0 src + va_eval_dst_opr64
va_s0 dst + 1 < pow2_64 /\ Vale.X64.Decls.valid_cf (va_get_flags va_s0)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == va_eval_dst_opr64 va_s0 dst + va_eval_opr64 va_s0 src + (if
Vale.X64.Decls.cf (va_get_flags va_s0) then 1 else 0) /\ Vale.X64.Decls.updated_cf
(va_get_flags va_sM) (va_eval_dst_opr64 va_s0 dst + va_eval_opr64 va_s0 src + (if
Vale.X64.Decls.cf (va_get_flags va_s0) then 1 else 0) >= pow2_64) /\ va_state_eq va_sM
(va_update_flags va_sM (va_update_ok va_sM (va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Adc64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state
-> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ va_eval_opr64
va_s0 src + va_eval_dst_opr64 va_s0 dst + 1 < pow2_64 /\ Vale.X64.Decls.valid_cf (va_get_flags
va_s0) /\ (forall (va_x_dst:va_value_dst_opr64) (va_x_efl:Vale.X64.Flags.t) . let va_sM =
va_upd_flags va_x_efl (va_upd_operand_dst_opr64 dst va_x_dst va_s0) in va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == va_eval_dst_opr64 va_s0 dst + va_eval_opr64 va_s0 src + va_if
(Vale.X64.Decls.cf (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0) /\ Vale.X64.Decls.updated_cf
(va_get_flags va_sM) (va_eval_dst_opr64 va_s0 dst + va_eval_opr64 va_s0 src + va_if
(Vale.X64.Decls.cf (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0) >= pow2_64) ==> va_k va_sM
(())))
val va_wpProof_Adc64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Adc64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Adc64 dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Adc64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Adc64 dst src)) =
(va_QProc (va_code_Adc64 dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Adc64 dst src)
(va_wpProof_Adc64 dst src))
//--
//-- Adc64Wrap
val va_code_Adc64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Adc64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Adc64Wrap : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Adc64Wrap dst src) va_s0 /\ va_is_dst_dst_opr64 dst
va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ Vale.X64.Decls.valid_cf (va_get_flags
va_s0)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.add_wrap64 (Vale.Arch.Types.add_wrap64
(va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src)) (if Vale.X64.Decls.cf (va_get_flags
va_s0) then 1 else 0) /\ Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64
va_s0 dst + va_eval_opr64 va_s0 src + (if Vale.X64.Decls.cf (va_get_flags va_s0) then 1 else 0)
>= pow2_64) /\ va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM
(va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Adc64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\
Vale.X64.Decls.valid_cf (va_get_flags va_s0) /\ (forall (va_x_dst:va_value_dst_opr64)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_operand_dst_opr64 dst
va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.add_wrap64
(Vale.Arch.Types.add_wrap64 (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src)) (va_if
(Vale.X64.Decls.cf (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0)) /\
Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64 va_s0 dst + va_eval_opr64
va_s0 src + va_if (Vale.X64.Decls.cf (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0) >=
pow2_64) ==> va_k va_sM (())))
val va_wpProof_Adc64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Adc64Wrap dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Adc64Wrap dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Adc64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Adc64Wrap dst src)) =
(va_QProc (va_code_Adc64Wrap dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Adc64Wrap dst
src) (va_wpProof_Adc64Wrap dst src))
//--
//-- Adcx64Wrap
val va_code_Adcx64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Adcx64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Adcx64Wrap : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Adcx64Wrap dst src) va_s0 /\ va_is_dst_dst_opr64 dst
va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ adx_enabled /\ Vale.X64.Decls.valid_cf
(va_get_flags va_s0)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.add_wrap64 (Vale.Arch.Types.add_wrap64
(va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src)) (if Vale.X64.Decls.cf (va_get_flags
va_s0) then 1 else 0) /\ Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64
va_s0 dst + va_eval_opr64 va_s0 src + (if Vale.X64.Decls.cf (va_get_flags va_s0) then 1 else 0)
>= pow2_64) /\ Vale.X64.Decls.maintained_of (va_get_flags va_sM) (va_get_flags va_s0) /\
va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM (va_update_operand_dst_opr64 dst
va_sM va_s0)))))
[@ va_qattr]
let va_wp_Adcx64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ adx_enabled /\
Vale.X64.Decls.valid_cf (va_get_flags va_s0) /\ (forall (va_x_dst:va_value_dst_opr64)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_operand_dst_opr64 dst
va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.add_wrap64
(Vale.Arch.Types.add_wrap64 (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src)) (va_if
(Vale.X64.Decls.cf (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0)) /\
Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64 va_s0 dst + va_eval_opr64
va_s0 src + va_if (Vale.X64.Decls.cf (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0) >=
pow2_64) /\ Vale.X64.Decls.maintained_of (va_get_flags va_sM) (va_get_flags va_s0) ==> va_k
va_sM (())))
val va_wpProof_Adcx64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Adcx64Wrap dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Adcx64Wrap dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Adcx64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Adcx64Wrap dst src)) =
(va_QProc (va_code_Adcx64Wrap dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Adcx64Wrap
dst src) (va_wpProof_Adcx64Wrap dst src))
//--
//-- Adox64Wrap
val va_code_Adox64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Adox64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Adox64Wrap : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Adox64Wrap dst src) va_s0 /\ va_is_dst_dst_opr64 dst
va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ adx_enabled /\ Vale.X64.Decls.valid_of
(va_get_flags va_s0)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.add_wrap64 (Vale.Arch.Types.add_wrap64
(va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src)) (if Vale.X64.Decls.overflow
(va_get_flags va_s0) then 1 else 0) /\ Vale.X64.Decls.updated_of (va_get_flags va_sM)
(va_eval_dst_opr64 va_s0 dst + va_eval_opr64 va_s0 src + (if Vale.X64.Decls.overflow
(va_get_flags va_s0) then 1 else 0) >= pow2_64) /\ Vale.X64.Decls.maintained_cf (va_get_flags
va_sM) (va_get_flags va_s0) /\ va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM
(va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Adox64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ adx_enabled /\
Vale.X64.Decls.valid_of (va_get_flags va_s0) /\ (forall (va_x_dst:va_value_dst_opr64)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_operand_dst_opr64 dst
va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.add_wrap64
(Vale.Arch.Types.add_wrap64 (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src)) (va_if
(Vale.X64.Decls.overflow (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0)) /\
Vale.X64.Decls.updated_of (va_get_flags va_sM) (va_eval_dst_opr64 va_s0 dst + va_eval_opr64
va_s0 src + va_if (Vale.X64.Decls.overflow (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0) >=
pow2_64) /\ Vale.X64.Decls.maintained_cf (va_get_flags va_sM) (va_get_flags va_s0) ==> va_k
va_sM (())))
val va_wpProof_Adox64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Adox64Wrap dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Adox64Wrap dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Adox64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Adox64Wrap dst src)) =
(va_QProc (va_code_Adox64Wrap dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Adox64Wrap
dst src) (va_wpProof_Adox64Wrap dst src))
//--
//-- Sub64
val va_code_Sub64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Sub64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Sub64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Sub64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ 0 <= va_eval_dst_opr64 va_s0 dst -
va_eval_opr64 va_s0 src))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == va_eval_dst_opr64 va_s0 dst - va_eval_opr64 va_s0 src /\
va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM (va_update_operand_dst_opr64 dst
va_sM va_s0)))))
[@ va_qattr]
let va_wp_Sub64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state
-> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ 0 <=
va_eval_dst_opr64 va_s0 dst - va_eval_opr64 va_s0 src /\ (forall (va_x_dst:va_value_dst_opr64)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_operand_dst_opr64 dst
va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst == va_eval_dst_opr64 va_s0
dst - va_eval_opr64 va_s0 src ==> va_k va_sM (())))
val va_wpProof_Sub64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Sub64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Sub64 dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Sub64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Sub64 dst src)) =
(va_QProc (va_code_Sub64 dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Sub64 dst src)
(va_wpProof_Sub64 dst src))
//--
//-- Sub64Wrap
val va_code_Sub64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Sub64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Sub64Wrap : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Sub64Wrap dst src) va_s0 /\ va_is_dst_dst_opr64 dst
va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.sub_wrap64 (va_eval_dst_opr64 va_s0 dst)
(va_eval_opr64 va_s0 src) /\ Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64
va_s0 dst - va_eval_opr64 va_s0 src < 0) /\ va_state_eq va_sM (va_update_flags va_sM
(va_update_ok va_sM (va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Sub64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ (forall
(va_x_dst:va_value_dst_opr64) (va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl
(va_upd_operand_dst_opr64 dst va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst
== Vale.Arch.Types.sub_wrap64 (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src) /\
Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64 va_s0 dst - va_eval_opr64
va_s0 src < 0) ==> va_k va_sM (())))
val va_wpProof_Sub64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Sub64Wrap dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Sub64Wrap dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Sub64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Sub64Wrap dst src)) =
(va_QProc (va_code_Sub64Wrap dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Sub64Wrap dst
src) (va_wpProof_Sub64Wrap dst src))
//--
//-- Sbb64
val va_code_Sbb64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Sbb64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Sbb64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Sbb64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ Vale.X64.Decls.valid_cf (va_get_flags
va_s0)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.sub_wrap64 (va_eval_dst_opr64 va_s0 dst)
(Vale.Arch.Types.add_wrap64 (va_eval_opr64 va_s0 src) (if Vale.X64.Decls.cf (va_get_flags
va_s0) then 1 else 0)) /\ Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64
va_s0 dst - (va_eval_opr64 va_s0 src + (if Vale.X64.Decls.cf (va_get_flags va_s0) then 1 else
0)) < 0) /\ va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM
(va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Sbb64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state
-> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\
Vale.X64.Decls.valid_cf (va_get_flags va_s0) /\ (forall (va_x_dst:va_value_dst_opr64)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_operand_dst_opr64 dst
va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.sub_wrap64
(va_eval_dst_opr64 va_s0 dst) (Vale.Arch.Types.add_wrap64 (va_eval_opr64 va_s0 src) (va_if
(Vale.X64.Decls.cf (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0))) /\
Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64 va_s0 dst - (va_eval_opr64
va_s0 src + va_if (Vale.X64.Decls.cf (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0)) < 0) ==>
va_k va_sM (())))
val va_wpProof_Sbb64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Sbb64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Sbb64 dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Sbb64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Sbb64 dst src)) =
(va_QProc (va_code_Sbb64 dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Sbb64 dst src)
(va_wpProof_Sbb64 dst src))
//--
//-- Mul64Wrap
val va_code_Mul64Wrap : src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Mul64Wrap : src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Mul64Wrap : va_b0:va_code -> va_s0:va_state -> src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Mul64Wrap src) va_s0 /\ va_is_src_opr64 src va_s0 /\
va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_mul_nat pow2_64 (va_get_reg64 rRdx va_sM) + va_get_reg64 rRax va_sM == va_mul_nat
(va_get_reg64 rRax va_s0) (va_eval_opr64 va_s0 src) /\ va_state_eq va_sM (va_update_reg64 rRdx
va_sM (va_update_reg64 rRax va_sM (va_update_flags va_sM (va_update_ok va_sM va_s0))))))
[@ va_qattr]
let va_wp_Mul64Wrap (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) :
Type0 =
(va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ (forall (va_x_efl:Vale.X64.Flags.t)
(va_x_rax:nat64) (va_x_rdx:nat64) . let va_sM = va_upd_reg64 rRdx va_x_rdx (va_upd_reg64 rRax
va_x_rax (va_upd_flags va_x_efl va_s0)) in va_get_ok va_sM /\ va_mul_nat pow2_64 (va_get_reg64
rRdx va_sM) + va_get_reg64 rRax va_sM == va_mul_nat (va_get_reg64 rRax va_s0) (va_eval_opr64
va_s0 src) ==> va_k va_sM (())))
val va_wpProof_Mul64Wrap : src:va_operand_opr64 -> va_s0:va_state -> va_k:(va_state -> unit ->
Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Mul64Wrap src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Mul64Wrap src) ([va_Mod_reg64 rRdx;
va_Mod_reg64 rRax; va_Mod_flags]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Mul64Wrap (src:va_operand_opr64) : (va_quickCode unit (va_code_Mul64Wrap src)) =
(va_QProc (va_code_Mul64Wrap src) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRax; va_Mod_flags])
(va_wp_Mul64Wrap src) (va_wpProof_Mul64Wrap src))
//--
//-- Mulx64
val va_code_Mulx64 : dst_hi:va_operand_dst_opr64 -> dst_lo:va_operand_dst_opr64 ->
src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Mulx64 : dst_hi:va_operand_dst_opr64 -> dst_lo:va_operand_dst_opr64 ->
src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Mulx64 : va_b0:va_code -> va_s0:va_state -> dst_hi:va_operand_dst_opr64 ->
dst_lo:va_operand_dst_opr64 -> src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Mulx64 dst_hi dst_lo src) va_s0 /\ va_is_dst_dst_opr64
dst_hi va_s0 /\ va_is_dst_dst_opr64 dst_lo va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok
va_s0 /\ bmi2_enabled /\ dst_hi =!= dst_lo))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_mul_nat pow2_64 (va_eval_dst_opr64 va_sM dst_hi) + va_eval_dst_opr64 va_sM dst_lo ==
va_mul_nat (va_get_reg64 rRdx va_s0) (va_eval_opr64 va_s0 src) /\ va_state_eq va_sM
(va_update_ok va_sM (va_update_operand_dst_opr64 dst_lo va_sM (va_update_operand_dst_opr64
dst_hi va_sM va_s0)))))
[@ va_qattr]
let va_wp_Mulx64 (dst_hi:va_operand_dst_opr64) (dst_lo:va_operand_dst_opr64) (src:va_operand_opr64)
(va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst_hi va_s0 /\ va_is_dst_dst_opr64 dst_lo va_s0 /\ va_is_src_opr64 src
va_s0 /\ va_get_ok va_s0 /\ bmi2_enabled /\ dst_hi =!= dst_lo /\ (forall
(va_x_dst_hi:va_value_dst_opr64) (va_x_dst_lo:va_value_dst_opr64) . let va_sM =
va_upd_operand_dst_opr64 dst_lo va_x_dst_lo (va_upd_operand_dst_opr64 dst_hi va_x_dst_hi va_s0)
in va_get_ok va_sM /\ va_mul_nat pow2_64 (va_eval_dst_opr64 va_sM dst_hi) + va_eval_dst_opr64
va_sM dst_lo == va_mul_nat (va_get_reg64 rRdx va_s0) (va_eval_opr64 va_s0 src) ==> va_k va_sM
(())))
val va_wpProof_Mulx64 : dst_hi:va_operand_dst_opr64 -> dst_lo:va_operand_dst_opr64 ->
src:va_operand_opr64 -> va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Mulx64 dst_hi dst_lo src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Mulx64 dst_hi dst_lo src)
([va_mod_dst_opr64 dst_lo; va_mod_dst_opr64 dst_hi]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Mulx64 (dst_hi:va_operand_dst_opr64) (dst_lo:va_operand_dst_opr64)
(src:va_operand_opr64) : (va_quickCode unit (va_code_Mulx64 dst_hi dst_lo src)) =
(va_QProc (va_code_Mulx64 dst_hi dst_lo src) ([va_mod_dst_opr64 dst_lo; va_mod_dst_opr64 dst_hi])
(va_wp_Mulx64 dst_hi dst_lo src) (va_wpProof_Mulx64 dst_hi dst_lo src))
//--
//-- IMul64
val va_code_IMul64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_IMul64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_IMul64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_IMul64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ va_mul_nat (va_eval_dst_opr64 va_s0 dst)
(va_eval_opr64 va_s0 src) < pow2_64))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == va_mul_nat (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0
src) /\ va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM
(va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_IMul64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state
-> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ va_mul_nat
(va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src) < pow2_64 /\ (forall
(va_x_dst:va_value_dst_opr64) (va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl
(va_upd_operand_dst_opr64 dst va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst
== va_mul_nat (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src) ==> va_k va_sM (())))
val va_wpProof_IMul64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_IMul64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_IMul64 dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_IMul64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_IMul64 dst src)) =
(va_QProc (va_code_IMul64 dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_IMul64 dst src)
(va_wpProof_IMul64 dst src))
//--
//-- Xor64
val va_code_Xor64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Xor64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Xor64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Xor64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.ixor64 (va_eval_dst_opr64 va_s0 dst)
(va_eval_opr64 va_s0 src) /\ ~(Vale.X64.Decls.overflow (va_get_flags va_sM)) /\
~(Vale.X64.Decls.cf (va_get_flags va_sM)) /\ Vale.X64.Decls.valid_cf (va_get_flags va_sM) /\
Vale.X64.Decls.valid_of (va_get_flags va_sM) /\ va_state_eq va_sM (va_update_flags va_sM
(va_update_ok va_sM (va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Xor64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state
-> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ (forall
(va_x_dst:va_value_dst_opr64) (va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl
(va_upd_operand_dst_opr64 dst va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst
== Vale.Arch.Types.ixor64 (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src) /\
~(Vale.X64.Decls.overflow (va_get_flags va_sM)) /\ ~(Vale.X64.Decls.cf (va_get_flags va_sM)) /\
Vale.X64.Decls.valid_cf (va_get_flags va_sM) /\ Vale.X64.Decls.valid_of (va_get_flags va_sM)
==> va_k va_sM (())))
val va_wpProof_Xor64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Xor64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Xor64 dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Xor64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit | false | false | Vale.X64.InsBasic.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val va_quick_Xor64 (dst: va_operand_dst_opr64) (src: va_operand_opr64)
: (va_quickCode unit (va_code_Xor64 dst src)) | [] | Vale.X64.InsBasic.va_quick_Xor64 | {
"file_name": "obj/Vale.X64.InsBasic.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | dst: Vale.X64.Decls.va_operand_dst_opr64 -> src: Vale.X64.Decls.va_operand_opr64
-> Vale.X64.QuickCode.va_quickCode Prims.unit (Vale.X64.InsBasic.va_code_Xor64 dst src) | {
"end_col": 31,
"end_line": 647,
"start_col": 2,
"start_line": 646
} |
Prims.Tot | val va_quick_Cpuid_Avx: Prims.unit -> (va_quickCode unit (va_code_Cpuid_Avx ())) | [
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let va_quick_Cpuid_Avx () : (va_quickCode unit (va_code_Cpuid_Avx ())) =
(va_QProc (va_code_Cpuid_Avx ()) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRcx; va_Mod_reg64 rRbx;
va_Mod_reg64 rRax]) va_wp_Cpuid_Avx va_wpProof_Cpuid_Avx) | val va_quick_Cpuid_Avx: Prims.unit -> (va_quickCode unit (va_code_Cpuid_Avx ()))
let va_quick_Cpuid_Avx () : (va_quickCode unit (va_code_Cpuid_Avx ())) = | false | null | false | (va_QProc (va_code_Cpuid_Avx ())
([va_Mod_reg64 rRdx; va_Mod_reg64 rRcx; va_Mod_reg64 rRbx; va_Mod_reg64 rRax])
va_wp_Cpuid_Avx
va_wpProof_Cpuid_Avx) | {
"checked_file": "Vale.X64.InsBasic.fsti.checked",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.QuickCode.fst.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.X64.Decls.fsti.checked",
"Vale.X64.CPU_Features_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.InsBasic.fsti"
} | [
"total"
] | [
"Prims.unit",
"Vale.X64.QuickCode.va_QProc",
"Vale.X64.InsBasic.va_code_Cpuid_Avx",
"Prims.Cons",
"Vale.X64.QuickCode.mod_t",
"Vale.X64.QuickCode.va_Mod_reg64",
"Vale.X64.Machine_s.rRdx",
"Vale.X64.Machine_s.rRcx",
"Vale.X64.Machine_s.rRbx",
"Vale.X64.Machine_s.rRax",
"Prims.Nil",
"Vale.X64.InsBasic.va_wp_Cpuid_Avx",
"Vale.X64.InsBasic.va_wpProof_Cpuid_Avx",
"Vale.X64.QuickCode.va_quickCode"
] | [] | module Vale.X64.InsBasic
open FStar.Mul
open Vale.Def.Types_s
open Vale.Arch.HeapImpl
open Vale.Arch.Types
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.X64.Decls
open Vale.X64.QuickCode
unfold let vale_heap = Vale.X64.Memory.vale_heap
unfold let vale_stack = Vale.X64.Stack_i.vale_stack
open Vale.X64.CPU_Features_s
//-- Mov64
val va_code_Mov64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Mov64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Mov64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Mov64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == va_eval_opr64 va_s0 src /\ va_state_eq va_sM (va_update_ok va_sM
(va_update_operand_dst_opr64 dst va_sM va_s0))))
[@ va_qattr]
let va_wp_Mov64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state
-> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ (forall
(va_x_dst:va_value_dst_opr64) . let va_sM = va_upd_operand_dst_opr64 dst va_x_dst va_s0 in
va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst == va_eval_opr64 va_s0 src ==> va_k va_sM (())))
val va_wpProof_Mov64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Mov64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Mov64 dst src) ([va_mod_dst_opr64
dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Mov64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Mov64 dst src)) =
(va_QProc (va_code_Mov64 dst src) ([va_mod_dst_opr64 dst]) (va_wp_Mov64 dst src)
(va_wpProof_Mov64 dst src))
//--
//-- Cmovc64
val va_code_Cmovc64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Cmovc64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Cmovc64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Cmovc64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst
va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ Vale.X64.Decls.valid_cf (va_get_flags
va_s0)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\ (if
Vale.X64.Decls.cf (va_get_flags va_sM) then (va_eval_dst_opr64 va_sM dst = va_eval_opr64 va_s0
src) else (va_eval_dst_opr64 va_sM dst = va_eval_dst_opr64 va_s0 dst)) /\ va_state_eq va_sM
(va_update_ok va_sM (va_update_operand_dst_opr64 dst va_sM va_s0))))
[@ va_qattr]
let va_wp_Cmovc64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\
Vale.X64.Decls.valid_cf (va_get_flags va_s0) /\ (forall (va_x_dst:va_value_dst_opr64) . let
va_sM = va_upd_operand_dst_opr64 dst va_x_dst va_s0 in va_get_ok va_sM /\ va_if
(Vale.X64.Decls.cf (va_get_flags va_sM)) (fun _ -> va_eval_dst_opr64 va_sM dst = va_eval_opr64
va_s0 src) (fun _ -> va_eval_dst_opr64 va_sM dst = va_eval_dst_opr64 va_s0 dst) ==> va_k va_sM
(())))
val va_wpProof_Cmovc64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Cmovc64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Cmovc64 dst src) ([va_mod_dst_opr64
dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Cmovc64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Cmovc64 dst src)) =
(va_QProc (va_code_Cmovc64 dst src) ([va_mod_dst_opr64 dst]) (va_wp_Cmovc64 dst src)
(va_wpProof_Cmovc64 dst src))
//--
//-- Add64
val va_code_Add64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Add64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Add64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Add64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ va_eval_opr64 va_s0 src + va_eval_dst_opr64
va_s0 dst < pow2_64))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == va_eval_dst_opr64 va_s0 dst + va_eval_opr64 va_s0 src /\
va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM (va_update_operand_dst_opr64 dst
va_sM va_s0)))))
[@ va_qattr]
let va_wp_Add64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state
-> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ va_eval_opr64
va_s0 src + va_eval_dst_opr64 va_s0 dst < pow2_64 /\ (forall (va_x_dst:va_value_dst_opr64)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_operand_dst_opr64 dst
va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst == va_eval_dst_opr64 va_s0
dst + va_eval_opr64 va_s0 src ==> va_k va_sM (())))
val va_wpProof_Add64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Add64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Add64 dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Add64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Add64 dst src)) =
(va_QProc (va_code_Add64 dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Add64 dst src)
(va_wpProof_Add64 dst src))
//--
//-- Add64Wrap
val va_code_Add64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Add64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Add64Wrap : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Add64Wrap dst src) va_s0 /\ va_is_dst_dst_opr64 dst
va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.add_wrap64 (va_eval_dst_opr64 va_s0 dst)
(va_eval_opr64 va_s0 src) /\ Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64
va_s0 dst + va_eval_opr64 va_s0 src >= pow2_64) /\ va_state_eq va_sM (va_update_flags va_sM
(va_update_ok va_sM (va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Add64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ (forall
(va_x_dst:va_value_dst_opr64) (va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl
(va_upd_operand_dst_opr64 dst va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst
== Vale.Arch.Types.add_wrap64 (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src) /\
Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64 va_s0 dst + va_eval_opr64
va_s0 src >= pow2_64) ==> va_k va_sM (())))
val va_wpProof_Add64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Add64Wrap dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Add64Wrap dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Add64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Add64Wrap dst src)) =
(va_QProc (va_code_Add64Wrap dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Add64Wrap dst
src) (va_wpProof_Add64Wrap dst src))
//--
//-- AddLea64
val va_code_AddLea64 : dst:va_operand_dst_opr64 -> src1:va_operand_opr64 -> src2:va_operand_opr64
-> Tot va_code
val va_codegen_success_AddLea64 : dst:va_operand_dst_opr64 -> src1:va_operand_opr64 ->
src2:va_operand_opr64 -> Tot va_pbool
val va_lemma_AddLea64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src1:va_operand_opr64 -> src2:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_AddLea64 dst src1 src2) va_s0 /\ va_is_dst_dst_opr64
dst va_s0 /\ va_is_src_opr64 src1 va_s0 /\ va_is_src_opr64 src2 va_s0 /\ va_get_ok va_s0 /\
Vale.X64.Decls.max_one_mem src1 src2 /\ va_eval_opr64 va_s0 src1 + va_eval_opr64 va_s0 src2 <
pow2_64))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == va_eval_opr64 va_s0 src1 + va_eval_opr64 va_s0 src2 /\
va_state_eq va_sM (va_update_ok va_sM (va_update_operand_dst_opr64 dst va_sM va_s0))))
[@ va_qattr]
let va_wp_AddLea64 (dst:va_operand_dst_opr64) (src1:va_operand_opr64) (src2:va_operand_opr64)
(va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src1 va_s0 /\ va_is_src_opr64 src2 va_s0 /\
va_get_ok va_s0 /\ Vale.X64.Decls.max_one_mem src1 src2 /\ va_eval_opr64 va_s0 src1 +
va_eval_opr64 va_s0 src2 < pow2_64 /\ (forall (va_x_dst:va_value_dst_opr64) . let va_sM =
va_upd_operand_dst_opr64 dst va_x_dst va_s0 in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst
== va_eval_opr64 va_s0 src1 + va_eval_opr64 va_s0 src2 ==> va_k va_sM (())))
val va_wpProof_AddLea64 : dst:va_operand_dst_opr64 -> src1:va_operand_opr64 ->
src2:va_operand_opr64 -> va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_AddLea64 dst src1 src2 va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_AddLea64 dst src1 src2)
([va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_AddLea64 (dst:va_operand_dst_opr64) (src1:va_operand_opr64) (src2:va_operand_opr64) :
(va_quickCode unit (va_code_AddLea64 dst src1 src2)) =
(va_QProc (va_code_AddLea64 dst src1 src2) ([va_mod_dst_opr64 dst]) (va_wp_AddLea64 dst src1
src2) (va_wpProof_AddLea64 dst src1 src2))
//--
//-- Adc64
val va_code_Adc64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Adc64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Adc64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Adc64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ va_eval_opr64 va_s0 src + va_eval_dst_opr64
va_s0 dst + 1 < pow2_64 /\ Vale.X64.Decls.valid_cf (va_get_flags va_s0)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == va_eval_dst_opr64 va_s0 dst + va_eval_opr64 va_s0 src + (if
Vale.X64.Decls.cf (va_get_flags va_s0) then 1 else 0) /\ Vale.X64.Decls.updated_cf
(va_get_flags va_sM) (va_eval_dst_opr64 va_s0 dst + va_eval_opr64 va_s0 src + (if
Vale.X64.Decls.cf (va_get_flags va_s0) then 1 else 0) >= pow2_64) /\ va_state_eq va_sM
(va_update_flags va_sM (va_update_ok va_sM (va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Adc64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state
-> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ va_eval_opr64
va_s0 src + va_eval_dst_opr64 va_s0 dst + 1 < pow2_64 /\ Vale.X64.Decls.valid_cf (va_get_flags
va_s0) /\ (forall (va_x_dst:va_value_dst_opr64) (va_x_efl:Vale.X64.Flags.t) . let va_sM =
va_upd_flags va_x_efl (va_upd_operand_dst_opr64 dst va_x_dst va_s0) in va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == va_eval_dst_opr64 va_s0 dst + va_eval_opr64 va_s0 src + va_if
(Vale.X64.Decls.cf (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0) /\ Vale.X64.Decls.updated_cf
(va_get_flags va_sM) (va_eval_dst_opr64 va_s0 dst + va_eval_opr64 va_s0 src + va_if
(Vale.X64.Decls.cf (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0) >= pow2_64) ==> va_k va_sM
(())))
val va_wpProof_Adc64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Adc64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Adc64 dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Adc64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Adc64 dst src)) =
(va_QProc (va_code_Adc64 dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Adc64 dst src)
(va_wpProof_Adc64 dst src))
//--
//-- Adc64Wrap
val va_code_Adc64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Adc64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Adc64Wrap : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Adc64Wrap dst src) va_s0 /\ va_is_dst_dst_opr64 dst
va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ Vale.X64.Decls.valid_cf (va_get_flags
va_s0)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.add_wrap64 (Vale.Arch.Types.add_wrap64
(va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src)) (if Vale.X64.Decls.cf (va_get_flags
va_s0) then 1 else 0) /\ Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64
va_s0 dst + va_eval_opr64 va_s0 src + (if Vale.X64.Decls.cf (va_get_flags va_s0) then 1 else 0)
>= pow2_64) /\ va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM
(va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Adc64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\
Vale.X64.Decls.valid_cf (va_get_flags va_s0) /\ (forall (va_x_dst:va_value_dst_opr64)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_operand_dst_opr64 dst
va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.add_wrap64
(Vale.Arch.Types.add_wrap64 (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src)) (va_if
(Vale.X64.Decls.cf (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0)) /\
Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64 va_s0 dst + va_eval_opr64
va_s0 src + va_if (Vale.X64.Decls.cf (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0) >=
pow2_64) ==> va_k va_sM (())))
val va_wpProof_Adc64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Adc64Wrap dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Adc64Wrap dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Adc64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Adc64Wrap dst src)) =
(va_QProc (va_code_Adc64Wrap dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Adc64Wrap dst
src) (va_wpProof_Adc64Wrap dst src))
//--
//-- Adcx64Wrap
val va_code_Adcx64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Adcx64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Adcx64Wrap : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Adcx64Wrap dst src) va_s0 /\ va_is_dst_dst_opr64 dst
va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ adx_enabled /\ Vale.X64.Decls.valid_cf
(va_get_flags va_s0)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.add_wrap64 (Vale.Arch.Types.add_wrap64
(va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src)) (if Vale.X64.Decls.cf (va_get_flags
va_s0) then 1 else 0) /\ Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64
va_s0 dst + va_eval_opr64 va_s0 src + (if Vale.X64.Decls.cf (va_get_flags va_s0) then 1 else 0)
>= pow2_64) /\ Vale.X64.Decls.maintained_of (va_get_flags va_sM) (va_get_flags va_s0) /\
va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM (va_update_operand_dst_opr64 dst
va_sM va_s0)))))
[@ va_qattr]
let va_wp_Adcx64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ adx_enabled /\
Vale.X64.Decls.valid_cf (va_get_flags va_s0) /\ (forall (va_x_dst:va_value_dst_opr64)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_operand_dst_opr64 dst
va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.add_wrap64
(Vale.Arch.Types.add_wrap64 (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src)) (va_if
(Vale.X64.Decls.cf (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0)) /\
Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64 va_s0 dst + va_eval_opr64
va_s0 src + va_if (Vale.X64.Decls.cf (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0) >=
pow2_64) /\ Vale.X64.Decls.maintained_of (va_get_flags va_sM) (va_get_flags va_s0) ==> va_k
va_sM (())))
val va_wpProof_Adcx64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Adcx64Wrap dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Adcx64Wrap dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Adcx64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Adcx64Wrap dst src)) =
(va_QProc (va_code_Adcx64Wrap dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Adcx64Wrap
dst src) (va_wpProof_Adcx64Wrap dst src))
//--
//-- Adox64Wrap
val va_code_Adox64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Adox64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Adox64Wrap : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Adox64Wrap dst src) va_s0 /\ va_is_dst_dst_opr64 dst
va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ adx_enabled /\ Vale.X64.Decls.valid_of
(va_get_flags va_s0)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.add_wrap64 (Vale.Arch.Types.add_wrap64
(va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src)) (if Vale.X64.Decls.overflow
(va_get_flags va_s0) then 1 else 0) /\ Vale.X64.Decls.updated_of (va_get_flags va_sM)
(va_eval_dst_opr64 va_s0 dst + va_eval_opr64 va_s0 src + (if Vale.X64.Decls.overflow
(va_get_flags va_s0) then 1 else 0) >= pow2_64) /\ Vale.X64.Decls.maintained_cf (va_get_flags
va_sM) (va_get_flags va_s0) /\ va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM
(va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Adox64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ adx_enabled /\
Vale.X64.Decls.valid_of (va_get_flags va_s0) /\ (forall (va_x_dst:va_value_dst_opr64)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_operand_dst_opr64 dst
va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.add_wrap64
(Vale.Arch.Types.add_wrap64 (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src)) (va_if
(Vale.X64.Decls.overflow (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0)) /\
Vale.X64.Decls.updated_of (va_get_flags va_sM) (va_eval_dst_opr64 va_s0 dst + va_eval_opr64
va_s0 src + va_if (Vale.X64.Decls.overflow (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0) >=
pow2_64) /\ Vale.X64.Decls.maintained_cf (va_get_flags va_sM) (va_get_flags va_s0) ==> va_k
va_sM (())))
val va_wpProof_Adox64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Adox64Wrap dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Adox64Wrap dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Adox64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Adox64Wrap dst src)) =
(va_QProc (va_code_Adox64Wrap dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Adox64Wrap
dst src) (va_wpProof_Adox64Wrap dst src))
//--
//-- Sub64
val va_code_Sub64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Sub64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Sub64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Sub64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ 0 <= va_eval_dst_opr64 va_s0 dst -
va_eval_opr64 va_s0 src))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == va_eval_dst_opr64 va_s0 dst - va_eval_opr64 va_s0 src /\
va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM (va_update_operand_dst_opr64 dst
va_sM va_s0)))))
[@ va_qattr]
let va_wp_Sub64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state
-> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ 0 <=
va_eval_dst_opr64 va_s0 dst - va_eval_opr64 va_s0 src /\ (forall (va_x_dst:va_value_dst_opr64)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_operand_dst_opr64 dst
va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst == va_eval_dst_opr64 va_s0
dst - va_eval_opr64 va_s0 src ==> va_k va_sM (())))
val va_wpProof_Sub64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Sub64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Sub64 dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Sub64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Sub64 dst src)) =
(va_QProc (va_code_Sub64 dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Sub64 dst src)
(va_wpProof_Sub64 dst src))
//--
//-- Sub64Wrap
val va_code_Sub64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Sub64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Sub64Wrap : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Sub64Wrap dst src) va_s0 /\ va_is_dst_dst_opr64 dst
va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.sub_wrap64 (va_eval_dst_opr64 va_s0 dst)
(va_eval_opr64 va_s0 src) /\ Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64
va_s0 dst - va_eval_opr64 va_s0 src < 0) /\ va_state_eq va_sM (va_update_flags va_sM
(va_update_ok va_sM (va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Sub64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ (forall
(va_x_dst:va_value_dst_opr64) (va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl
(va_upd_operand_dst_opr64 dst va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst
== Vale.Arch.Types.sub_wrap64 (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src) /\
Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64 va_s0 dst - va_eval_opr64
va_s0 src < 0) ==> va_k va_sM (())))
val va_wpProof_Sub64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Sub64Wrap dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Sub64Wrap dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Sub64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Sub64Wrap dst src)) =
(va_QProc (va_code_Sub64Wrap dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Sub64Wrap dst
src) (va_wpProof_Sub64Wrap dst src))
//--
//-- Sbb64
val va_code_Sbb64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Sbb64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Sbb64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Sbb64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ Vale.X64.Decls.valid_cf (va_get_flags
va_s0)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.sub_wrap64 (va_eval_dst_opr64 va_s0 dst)
(Vale.Arch.Types.add_wrap64 (va_eval_opr64 va_s0 src) (if Vale.X64.Decls.cf (va_get_flags
va_s0) then 1 else 0)) /\ Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64
va_s0 dst - (va_eval_opr64 va_s0 src + (if Vale.X64.Decls.cf (va_get_flags va_s0) then 1 else
0)) < 0) /\ va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM
(va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Sbb64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state
-> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\
Vale.X64.Decls.valid_cf (va_get_flags va_s0) /\ (forall (va_x_dst:va_value_dst_opr64)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_operand_dst_opr64 dst
va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.sub_wrap64
(va_eval_dst_opr64 va_s0 dst) (Vale.Arch.Types.add_wrap64 (va_eval_opr64 va_s0 src) (va_if
(Vale.X64.Decls.cf (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0))) /\
Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64 va_s0 dst - (va_eval_opr64
va_s0 src + va_if (Vale.X64.Decls.cf (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0)) < 0) ==>
va_k va_sM (())))
val va_wpProof_Sbb64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Sbb64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Sbb64 dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Sbb64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Sbb64 dst src)) =
(va_QProc (va_code_Sbb64 dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Sbb64 dst src)
(va_wpProof_Sbb64 dst src))
//--
//-- Mul64Wrap
val va_code_Mul64Wrap : src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Mul64Wrap : src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Mul64Wrap : va_b0:va_code -> va_s0:va_state -> src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Mul64Wrap src) va_s0 /\ va_is_src_opr64 src va_s0 /\
va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_mul_nat pow2_64 (va_get_reg64 rRdx va_sM) + va_get_reg64 rRax va_sM == va_mul_nat
(va_get_reg64 rRax va_s0) (va_eval_opr64 va_s0 src) /\ va_state_eq va_sM (va_update_reg64 rRdx
va_sM (va_update_reg64 rRax va_sM (va_update_flags va_sM (va_update_ok va_sM va_s0))))))
[@ va_qattr]
let va_wp_Mul64Wrap (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) :
Type0 =
(va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ (forall (va_x_efl:Vale.X64.Flags.t)
(va_x_rax:nat64) (va_x_rdx:nat64) . let va_sM = va_upd_reg64 rRdx va_x_rdx (va_upd_reg64 rRax
va_x_rax (va_upd_flags va_x_efl va_s0)) in va_get_ok va_sM /\ va_mul_nat pow2_64 (va_get_reg64
rRdx va_sM) + va_get_reg64 rRax va_sM == va_mul_nat (va_get_reg64 rRax va_s0) (va_eval_opr64
va_s0 src) ==> va_k va_sM (())))
val va_wpProof_Mul64Wrap : src:va_operand_opr64 -> va_s0:va_state -> va_k:(va_state -> unit ->
Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Mul64Wrap src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Mul64Wrap src) ([va_Mod_reg64 rRdx;
va_Mod_reg64 rRax; va_Mod_flags]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Mul64Wrap (src:va_operand_opr64) : (va_quickCode unit (va_code_Mul64Wrap src)) =
(va_QProc (va_code_Mul64Wrap src) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRax; va_Mod_flags])
(va_wp_Mul64Wrap src) (va_wpProof_Mul64Wrap src))
//--
//-- Mulx64
val va_code_Mulx64 : dst_hi:va_operand_dst_opr64 -> dst_lo:va_operand_dst_opr64 ->
src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Mulx64 : dst_hi:va_operand_dst_opr64 -> dst_lo:va_operand_dst_opr64 ->
src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Mulx64 : va_b0:va_code -> va_s0:va_state -> dst_hi:va_operand_dst_opr64 ->
dst_lo:va_operand_dst_opr64 -> src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Mulx64 dst_hi dst_lo src) va_s0 /\ va_is_dst_dst_opr64
dst_hi va_s0 /\ va_is_dst_dst_opr64 dst_lo va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok
va_s0 /\ bmi2_enabled /\ dst_hi =!= dst_lo))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_mul_nat pow2_64 (va_eval_dst_opr64 va_sM dst_hi) + va_eval_dst_opr64 va_sM dst_lo ==
va_mul_nat (va_get_reg64 rRdx va_s0) (va_eval_opr64 va_s0 src) /\ va_state_eq va_sM
(va_update_ok va_sM (va_update_operand_dst_opr64 dst_lo va_sM (va_update_operand_dst_opr64
dst_hi va_sM va_s0)))))
[@ va_qattr]
let va_wp_Mulx64 (dst_hi:va_operand_dst_opr64) (dst_lo:va_operand_dst_opr64) (src:va_operand_opr64)
(va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst_hi va_s0 /\ va_is_dst_dst_opr64 dst_lo va_s0 /\ va_is_src_opr64 src
va_s0 /\ va_get_ok va_s0 /\ bmi2_enabled /\ dst_hi =!= dst_lo /\ (forall
(va_x_dst_hi:va_value_dst_opr64) (va_x_dst_lo:va_value_dst_opr64) . let va_sM =
va_upd_operand_dst_opr64 dst_lo va_x_dst_lo (va_upd_operand_dst_opr64 dst_hi va_x_dst_hi va_s0)
in va_get_ok va_sM /\ va_mul_nat pow2_64 (va_eval_dst_opr64 va_sM dst_hi) + va_eval_dst_opr64
va_sM dst_lo == va_mul_nat (va_get_reg64 rRdx va_s0) (va_eval_opr64 va_s0 src) ==> va_k va_sM
(())))
val va_wpProof_Mulx64 : dst_hi:va_operand_dst_opr64 -> dst_lo:va_operand_dst_opr64 ->
src:va_operand_opr64 -> va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Mulx64 dst_hi dst_lo src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Mulx64 dst_hi dst_lo src)
([va_mod_dst_opr64 dst_lo; va_mod_dst_opr64 dst_hi]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Mulx64 (dst_hi:va_operand_dst_opr64) (dst_lo:va_operand_dst_opr64)
(src:va_operand_opr64) : (va_quickCode unit (va_code_Mulx64 dst_hi dst_lo src)) =
(va_QProc (va_code_Mulx64 dst_hi dst_lo src) ([va_mod_dst_opr64 dst_lo; va_mod_dst_opr64 dst_hi])
(va_wp_Mulx64 dst_hi dst_lo src) (va_wpProof_Mulx64 dst_hi dst_lo src))
//--
//-- IMul64
val va_code_IMul64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_IMul64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_IMul64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_IMul64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ va_mul_nat (va_eval_dst_opr64 va_s0 dst)
(va_eval_opr64 va_s0 src) < pow2_64))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == va_mul_nat (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0
src) /\ va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM
(va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_IMul64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state
-> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ va_mul_nat
(va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src) < pow2_64 /\ (forall
(va_x_dst:va_value_dst_opr64) (va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl
(va_upd_operand_dst_opr64 dst va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst
== va_mul_nat (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src) ==> va_k va_sM (())))
val va_wpProof_IMul64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_IMul64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_IMul64 dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_IMul64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_IMul64 dst src)) =
(va_QProc (va_code_IMul64 dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_IMul64 dst src)
(va_wpProof_IMul64 dst src))
//--
//-- Xor64
val va_code_Xor64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Xor64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Xor64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Xor64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.ixor64 (va_eval_dst_opr64 va_s0 dst)
(va_eval_opr64 va_s0 src) /\ ~(Vale.X64.Decls.overflow (va_get_flags va_sM)) /\
~(Vale.X64.Decls.cf (va_get_flags va_sM)) /\ Vale.X64.Decls.valid_cf (va_get_flags va_sM) /\
Vale.X64.Decls.valid_of (va_get_flags va_sM) /\ va_state_eq va_sM (va_update_flags va_sM
(va_update_ok va_sM (va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Xor64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state
-> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ (forall
(va_x_dst:va_value_dst_opr64) (va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl
(va_upd_operand_dst_opr64 dst va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst
== Vale.Arch.Types.ixor64 (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src) /\
~(Vale.X64.Decls.overflow (va_get_flags va_sM)) /\ ~(Vale.X64.Decls.cf (va_get_flags va_sM)) /\
Vale.X64.Decls.valid_cf (va_get_flags va_sM) /\ Vale.X64.Decls.valid_of (va_get_flags va_sM)
==> va_k va_sM (())))
val va_wpProof_Xor64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Xor64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Xor64 dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Xor64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Xor64 dst src)) =
(va_QProc (va_code_Xor64 dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Xor64 dst src)
(va_wpProof_Xor64 dst src))
//--
//-- And64
val va_code_And64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_And64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_And64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_And64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.iand64 (va_eval_dst_opr64 va_s0 dst)
(va_eval_opr64 va_s0 src) /\ va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM
(va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_And64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state
-> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ (forall
(va_x_dst:va_value_dst_opr64) (va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl
(va_upd_operand_dst_opr64 dst va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst
== Vale.Arch.Types.iand64 (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src) ==> va_k
va_sM (())))
val va_wpProof_And64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_And64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_And64 dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_And64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_And64 dst src)) =
(va_QProc (va_code_And64 dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_And64 dst src)
(va_wpProof_And64 dst src))
//--
//-- Shl64
val va_code_Shl64 : dst:va_operand_dst_opr64 -> amt:va_operand_shift_amt64 -> Tot va_code
val va_codegen_success_Shl64 : dst:va_operand_dst_opr64 -> amt:va_operand_shift_amt64 -> Tot
va_pbool
val va_lemma_Shl64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
amt:va_operand_shift_amt64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Shl64 dst amt) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_shift_amt64 amt va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.ishl64 (va_eval_dst_opr64 va_s0 dst)
(va_eval_shift_amt64 va_s0 amt) /\ va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM
(va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Shl64 (dst:va_operand_dst_opr64) (amt:va_operand_shift_amt64) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_shift_amt64 amt va_s0 /\ va_get_ok va_s0 /\ (forall
(va_x_dst:va_value_dst_opr64) (va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl
(va_upd_operand_dst_opr64 dst va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst
== Vale.Arch.Types.ishl64 (va_eval_dst_opr64 va_s0 dst) (va_eval_shift_amt64 va_s0 amt) ==>
va_k va_sM (())))
val va_wpProof_Shl64 : dst:va_operand_dst_opr64 -> amt:va_operand_shift_amt64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Shl64 dst amt va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Shl64 dst amt) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Shl64 (dst:va_operand_dst_opr64) (amt:va_operand_shift_amt64) : (va_quickCode unit
(va_code_Shl64 dst amt)) =
(va_QProc (va_code_Shl64 dst amt) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Shl64 dst amt)
(va_wpProof_Shl64 dst amt))
//--
//-- Shr64
val va_code_Shr64 : dst:va_operand_dst_opr64 -> amt:va_operand_shift_amt64 -> Tot va_code
val va_codegen_success_Shr64 : dst:va_operand_dst_opr64 -> amt:va_operand_shift_amt64 -> Tot
va_pbool
val va_lemma_Shr64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
amt:va_operand_shift_amt64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Shr64 dst amt) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_shift_amt64 amt va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.ishr64 (va_eval_dst_opr64 va_s0 dst)
(va_eval_shift_amt64 va_s0 amt) /\ va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM
(va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Shr64 (dst:va_operand_dst_opr64) (amt:va_operand_shift_amt64) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_shift_amt64 amt va_s0 /\ va_get_ok va_s0 /\ (forall
(va_x_dst:va_value_dst_opr64) (va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl
(va_upd_operand_dst_opr64 dst va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst
== Vale.Arch.Types.ishr64 (va_eval_dst_opr64 va_s0 dst) (va_eval_shift_amt64 va_s0 amt) ==>
va_k va_sM (())))
val va_wpProof_Shr64 : dst:va_operand_dst_opr64 -> amt:va_operand_shift_amt64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Shr64 dst amt va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Shr64 dst amt) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Shr64 (dst:va_operand_dst_opr64) (amt:va_operand_shift_amt64) : (va_quickCode unit
(va_code_Shr64 dst amt)) =
(va_QProc (va_code_Shr64 dst amt) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Shr64 dst amt)
(va_wpProof_Shr64 dst amt))
//--
//-- Cpuid_AES
val va_code_Cpuid_AES : va_dummy:unit -> Tot va_code
val va_codegen_success_Cpuid_AES : va_dummy:unit -> Tot va_pbool
val va_lemma_Cpuid_AES : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Cpuid_AES ()) va_s0 /\ va_get_ok va_s0 /\ va_get_reg64
rRax va_s0 = 1))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 33554432 > 0 == aesni_enabled /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 2 > 0 == pclmulqdq_enabled /\ va_state_eq
va_sM (va_update_reg64 rRdx va_sM (va_update_reg64 rRcx va_sM (va_update_reg64 rRbx va_sM
(va_update_reg64 rRax va_sM (va_update_ok va_sM va_s0)))))))
[@ va_qattr]
let va_wp_Cpuid_AES (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ va_get_reg64 rRax va_s0 = 1 /\ (forall (va_x_rax:nat64) (va_x_rbx:nat64)
(va_x_rcx:nat64) (va_x_rdx:nat64) . let va_sM = va_upd_reg64 rRdx va_x_rdx (va_upd_reg64 rRcx
va_x_rcx (va_upd_reg64 rRbx va_x_rbx (va_upd_reg64 rRax va_x_rax va_s0))) in va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 33554432 > 0 == aesni_enabled /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 2 > 0 == pclmulqdq_enabled ==> va_k va_sM
(())))
val va_wpProof_Cpuid_AES : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Cpuid_AES va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Cpuid_AES ()) ([va_Mod_reg64 rRdx;
va_Mod_reg64 rRcx; va_Mod_reg64 rRbx; va_Mod_reg64 rRax]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Cpuid_AES () : (va_quickCode unit (va_code_Cpuid_AES ())) =
(va_QProc (va_code_Cpuid_AES ()) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRcx; va_Mod_reg64 rRbx;
va_Mod_reg64 rRax]) va_wp_Cpuid_AES va_wpProof_Cpuid_AES)
//--
//-- Cpuid_Sha
val va_code_Cpuid_Sha : va_dummy:unit -> Tot va_code
val va_codegen_success_Cpuid_Sha : va_dummy:unit -> Tot va_pbool
val va_lemma_Cpuid_Sha : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Cpuid_Sha ()) va_s0 /\ va_get_ok va_s0 /\ va_get_reg64
rRax va_s0 = 7 /\ va_get_reg64 rRcx va_s0 = 0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 536870912 > 0 == sha_enabled /\ va_state_eq
va_sM (va_update_reg64 rRdx va_sM (va_update_reg64 rRcx va_sM (va_update_reg64 rRbx va_sM
(va_update_reg64 rRax va_sM (va_update_ok va_sM va_s0)))))))
[@ va_qattr]
let va_wp_Cpuid_Sha (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ va_get_reg64 rRax va_s0 = 7 /\ va_get_reg64 rRcx va_s0 = 0 /\ (forall
(va_x_rax:nat64) (va_x_rbx:nat64) (va_x_rcx:nat64) (va_x_rdx:nat64) . let va_sM = va_upd_reg64
rRdx va_x_rdx (va_upd_reg64 rRcx va_x_rcx (va_upd_reg64 rRbx va_x_rbx (va_upd_reg64 rRax
va_x_rax va_s0))) in va_get_ok va_sM /\ Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM)
536870912 > 0 == sha_enabled ==> va_k va_sM (())))
val va_wpProof_Cpuid_Sha : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Cpuid_Sha va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Cpuid_Sha ()) ([va_Mod_reg64 rRdx;
va_Mod_reg64 rRcx; va_Mod_reg64 rRbx; va_Mod_reg64 rRax]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Cpuid_Sha () : (va_quickCode unit (va_code_Cpuid_Sha ())) =
(va_QProc (va_code_Cpuid_Sha ()) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRcx; va_Mod_reg64 rRbx;
va_Mod_reg64 rRax]) va_wp_Cpuid_Sha va_wpProof_Cpuid_Sha)
//--
//-- Cpuid_Adx_Bmi2
val va_code_Cpuid_Adx_Bmi2 : va_dummy:unit -> Tot va_code
val va_codegen_success_Cpuid_Adx_Bmi2 : va_dummy:unit -> Tot va_pbool
val va_lemma_Cpuid_Adx_Bmi2 : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Cpuid_Adx_Bmi2 ()) va_s0 /\ va_get_ok va_s0 /\
va_get_reg64 rRax va_s0 = 7 /\ va_get_reg64 rRcx va_s0 = 0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 256 > 0 == bmi2_enabled /\
Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 524288 > 0 == adx_enabled /\ va_state_eq va_sM
(va_update_reg64 rRdx va_sM (va_update_reg64 rRcx va_sM (va_update_reg64 rRbx va_sM
(va_update_reg64 rRax va_sM (va_update_ok va_sM va_s0)))))))
[@ va_qattr]
let va_wp_Cpuid_Adx_Bmi2 (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ va_get_reg64 rRax va_s0 = 7 /\ va_get_reg64 rRcx va_s0 = 0 /\ (forall
(va_x_rax:nat64) (va_x_rbx:nat64) (va_x_rcx:nat64) (va_x_rdx:nat64) . let va_sM = va_upd_reg64
rRdx va_x_rdx (va_upd_reg64 rRcx va_x_rcx (va_upd_reg64 rRbx va_x_rbx (va_upd_reg64 rRax
va_x_rax va_s0))) in va_get_ok va_sM /\ Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 256 >
0 == bmi2_enabled /\ Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 524288 > 0 == adx_enabled
==> va_k va_sM (())))
val va_wpProof_Cpuid_Adx_Bmi2 : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Cpuid_Adx_Bmi2 va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Cpuid_Adx_Bmi2 ()) ([va_Mod_reg64
rRdx; va_Mod_reg64 rRcx; va_Mod_reg64 rRbx; va_Mod_reg64 rRax]) va_s0 va_k ((va_sM, va_f0,
va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Cpuid_Adx_Bmi2 () : (va_quickCode unit (va_code_Cpuid_Adx_Bmi2 ())) =
(va_QProc (va_code_Cpuid_Adx_Bmi2 ()) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRcx; va_Mod_reg64 rRbx;
va_Mod_reg64 rRax]) va_wp_Cpuid_Adx_Bmi2 va_wpProof_Cpuid_Adx_Bmi2)
//--
//-- Cpuid_Avx
val va_code_Cpuid_Avx : va_dummy:unit -> Tot va_code
val va_codegen_success_Cpuid_Avx : va_dummy:unit -> Tot va_pbool
val va_lemma_Cpuid_Avx : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Cpuid_Avx ()) va_s0 /\ va_get_ok va_s0 /\ va_get_reg64
rRax va_s0 = 1))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 268435456 > 0 == avx_cpuid_enabled /\
va_state_eq va_sM (va_update_reg64 rRdx va_sM (va_update_reg64 rRcx va_sM (va_update_reg64 rRbx
va_sM (va_update_reg64 rRax va_sM (va_update_ok va_sM va_s0)))))))
[@ va_qattr]
let va_wp_Cpuid_Avx (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ va_get_reg64 rRax va_s0 = 1 /\ (forall (va_x_rax:nat64) (va_x_rbx:nat64)
(va_x_rcx:nat64) (va_x_rdx:nat64) . let va_sM = va_upd_reg64 rRdx va_x_rdx (va_upd_reg64 rRcx
va_x_rcx (va_upd_reg64 rRbx va_x_rbx (va_upd_reg64 rRax va_x_rax va_s0))) in va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 268435456 > 0 == avx_cpuid_enabled ==> va_k
va_sM (())))
val va_wpProof_Cpuid_Avx : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Cpuid_Avx va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Cpuid_Avx ()) ([va_Mod_reg64 rRdx;
va_Mod_reg64 rRcx; va_Mod_reg64 rRbx; va_Mod_reg64 rRax]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr] | false | false | Vale.X64.InsBasic.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val va_quick_Cpuid_Avx: Prims.unit -> (va_quickCode unit (va_code_Cpuid_Avx ())) | [] | Vale.X64.InsBasic.va_quick_Cpuid_Avx | {
"file_name": "obj/Vale.X64.InsBasic.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | _: Prims.unit -> Vale.X64.QuickCode.va_quickCode Prims.unit (Vale.X64.InsBasic.va_code_Cpuid_Avx ()) | {
"end_col": 61,
"end_line": 890,
"start_col": 2,
"start_line": 889
} |
Prims.Tot | val va_quick_Cpuid_Osxsave: Prims.unit -> (va_quickCode unit (va_code_Cpuid_Osxsave ())) | [
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let va_quick_Cpuid_Osxsave () : (va_quickCode unit (va_code_Cpuid_Osxsave ())) =
(va_QProc (va_code_Cpuid_Osxsave ()) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRcx; va_Mod_reg64 rRbx;
va_Mod_reg64 rRax]) va_wp_Cpuid_Osxsave va_wpProof_Cpuid_Osxsave) | val va_quick_Cpuid_Osxsave: Prims.unit -> (va_quickCode unit (va_code_Cpuid_Osxsave ()))
let va_quick_Cpuid_Osxsave () : (va_quickCode unit (va_code_Cpuid_Osxsave ())) = | false | null | false | (va_QProc (va_code_Cpuid_Osxsave ())
([va_Mod_reg64 rRdx; va_Mod_reg64 rRcx; va_Mod_reg64 rRbx; va_Mod_reg64 rRax])
va_wp_Cpuid_Osxsave
va_wpProof_Cpuid_Osxsave) | {
"checked_file": "Vale.X64.InsBasic.fsti.checked",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.QuickCode.fst.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.X64.Decls.fsti.checked",
"Vale.X64.CPU_Features_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.InsBasic.fsti"
} | [
"total"
] | [
"Prims.unit",
"Vale.X64.QuickCode.va_QProc",
"Vale.X64.InsBasic.va_code_Cpuid_Osxsave",
"Prims.Cons",
"Vale.X64.QuickCode.mod_t",
"Vale.X64.QuickCode.va_Mod_reg64",
"Vale.X64.Machine_s.rRdx",
"Vale.X64.Machine_s.rRcx",
"Vale.X64.Machine_s.rRbx",
"Vale.X64.Machine_s.rRax",
"Prims.Nil",
"Vale.X64.InsBasic.va_wp_Cpuid_Osxsave",
"Vale.X64.InsBasic.va_wpProof_Cpuid_Osxsave",
"Vale.X64.QuickCode.va_quickCode"
] | [] | module Vale.X64.InsBasic
open FStar.Mul
open Vale.Def.Types_s
open Vale.Arch.HeapImpl
open Vale.Arch.Types
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.X64.Decls
open Vale.X64.QuickCode
unfold let vale_heap = Vale.X64.Memory.vale_heap
unfold let vale_stack = Vale.X64.Stack_i.vale_stack
open Vale.X64.CPU_Features_s
//-- Mov64
val va_code_Mov64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Mov64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Mov64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Mov64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == va_eval_opr64 va_s0 src /\ va_state_eq va_sM (va_update_ok va_sM
(va_update_operand_dst_opr64 dst va_sM va_s0))))
[@ va_qattr]
let va_wp_Mov64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state
-> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ (forall
(va_x_dst:va_value_dst_opr64) . let va_sM = va_upd_operand_dst_opr64 dst va_x_dst va_s0 in
va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst == va_eval_opr64 va_s0 src ==> va_k va_sM (())))
val va_wpProof_Mov64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Mov64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Mov64 dst src) ([va_mod_dst_opr64
dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Mov64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Mov64 dst src)) =
(va_QProc (va_code_Mov64 dst src) ([va_mod_dst_opr64 dst]) (va_wp_Mov64 dst src)
(va_wpProof_Mov64 dst src))
//--
//-- Cmovc64
val va_code_Cmovc64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Cmovc64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Cmovc64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Cmovc64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst
va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ Vale.X64.Decls.valid_cf (va_get_flags
va_s0)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\ (if
Vale.X64.Decls.cf (va_get_flags va_sM) then (va_eval_dst_opr64 va_sM dst = va_eval_opr64 va_s0
src) else (va_eval_dst_opr64 va_sM dst = va_eval_dst_opr64 va_s0 dst)) /\ va_state_eq va_sM
(va_update_ok va_sM (va_update_operand_dst_opr64 dst va_sM va_s0))))
[@ va_qattr]
let va_wp_Cmovc64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\
Vale.X64.Decls.valid_cf (va_get_flags va_s0) /\ (forall (va_x_dst:va_value_dst_opr64) . let
va_sM = va_upd_operand_dst_opr64 dst va_x_dst va_s0 in va_get_ok va_sM /\ va_if
(Vale.X64.Decls.cf (va_get_flags va_sM)) (fun _ -> va_eval_dst_opr64 va_sM dst = va_eval_opr64
va_s0 src) (fun _ -> va_eval_dst_opr64 va_sM dst = va_eval_dst_opr64 va_s0 dst) ==> va_k va_sM
(())))
val va_wpProof_Cmovc64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Cmovc64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Cmovc64 dst src) ([va_mod_dst_opr64
dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Cmovc64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Cmovc64 dst src)) =
(va_QProc (va_code_Cmovc64 dst src) ([va_mod_dst_opr64 dst]) (va_wp_Cmovc64 dst src)
(va_wpProof_Cmovc64 dst src))
//--
//-- Add64
val va_code_Add64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Add64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Add64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Add64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ va_eval_opr64 va_s0 src + va_eval_dst_opr64
va_s0 dst < pow2_64))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == va_eval_dst_opr64 va_s0 dst + va_eval_opr64 va_s0 src /\
va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM (va_update_operand_dst_opr64 dst
va_sM va_s0)))))
[@ va_qattr]
let va_wp_Add64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state
-> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ va_eval_opr64
va_s0 src + va_eval_dst_opr64 va_s0 dst < pow2_64 /\ (forall (va_x_dst:va_value_dst_opr64)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_operand_dst_opr64 dst
va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst == va_eval_dst_opr64 va_s0
dst + va_eval_opr64 va_s0 src ==> va_k va_sM (())))
val va_wpProof_Add64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Add64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Add64 dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Add64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Add64 dst src)) =
(va_QProc (va_code_Add64 dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Add64 dst src)
(va_wpProof_Add64 dst src))
//--
//-- Add64Wrap
val va_code_Add64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Add64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Add64Wrap : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Add64Wrap dst src) va_s0 /\ va_is_dst_dst_opr64 dst
va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.add_wrap64 (va_eval_dst_opr64 va_s0 dst)
(va_eval_opr64 va_s0 src) /\ Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64
va_s0 dst + va_eval_opr64 va_s0 src >= pow2_64) /\ va_state_eq va_sM (va_update_flags va_sM
(va_update_ok va_sM (va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Add64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ (forall
(va_x_dst:va_value_dst_opr64) (va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl
(va_upd_operand_dst_opr64 dst va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst
== Vale.Arch.Types.add_wrap64 (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src) /\
Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64 va_s0 dst + va_eval_opr64
va_s0 src >= pow2_64) ==> va_k va_sM (())))
val va_wpProof_Add64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Add64Wrap dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Add64Wrap dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Add64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Add64Wrap dst src)) =
(va_QProc (va_code_Add64Wrap dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Add64Wrap dst
src) (va_wpProof_Add64Wrap dst src))
//--
//-- AddLea64
val va_code_AddLea64 : dst:va_operand_dst_opr64 -> src1:va_operand_opr64 -> src2:va_operand_opr64
-> Tot va_code
val va_codegen_success_AddLea64 : dst:va_operand_dst_opr64 -> src1:va_operand_opr64 ->
src2:va_operand_opr64 -> Tot va_pbool
val va_lemma_AddLea64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src1:va_operand_opr64 -> src2:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_AddLea64 dst src1 src2) va_s0 /\ va_is_dst_dst_opr64
dst va_s0 /\ va_is_src_opr64 src1 va_s0 /\ va_is_src_opr64 src2 va_s0 /\ va_get_ok va_s0 /\
Vale.X64.Decls.max_one_mem src1 src2 /\ va_eval_opr64 va_s0 src1 + va_eval_opr64 va_s0 src2 <
pow2_64))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == va_eval_opr64 va_s0 src1 + va_eval_opr64 va_s0 src2 /\
va_state_eq va_sM (va_update_ok va_sM (va_update_operand_dst_opr64 dst va_sM va_s0))))
[@ va_qattr]
let va_wp_AddLea64 (dst:va_operand_dst_opr64) (src1:va_operand_opr64) (src2:va_operand_opr64)
(va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src1 va_s0 /\ va_is_src_opr64 src2 va_s0 /\
va_get_ok va_s0 /\ Vale.X64.Decls.max_one_mem src1 src2 /\ va_eval_opr64 va_s0 src1 +
va_eval_opr64 va_s0 src2 < pow2_64 /\ (forall (va_x_dst:va_value_dst_opr64) . let va_sM =
va_upd_operand_dst_opr64 dst va_x_dst va_s0 in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst
== va_eval_opr64 va_s0 src1 + va_eval_opr64 va_s0 src2 ==> va_k va_sM (())))
val va_wpProof_AddLea64 : dst:va_operand_dst_opr64 -> src1:va_operand_opr64 ->
src2:va_operand_opr64 -> va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_AddLea64 dst src1 src2 va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_AddLea64 dst src1 src2)
([va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_AddLea64 (dst:va_operand_dst_opr64) (src1:va_operand_opr64) (src2:va_operand_opr64) :
(va_quickCode unit (va_code_AddLea64 dst src1 src2)) =
(va_QProc (va_code_AddLea64 dst src1 src2) ([va_mod_dst_opr64 dst]) (va_wp_AddLea64 dst src1
src2) (va_wpProof_AddLea64 dst src1 src2))
//--
//-- Adc64
val va_code_Adc64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Adc64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Adc64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Adc64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ va_eval_opr64 va_s0 src + va_eval_dst_opr64
va_s0 dst + 1 < pow2_64 /\ Vale.X64.Decls.valid_cf (va_get_flags va_s0)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == va_eval_dst_opr64 va_s0 dst + va_eval_opr64 va_s0 src + (if
Vale.X64.Decls.cf (va_get_flags va_s0) then 1 else 0) /\ Vale.X64.Decls.updated_cf
(va_get_flags va_sM) (va_eval_dst_opr64 va_s0 dst + va_eval_opr64 va_s0 src + (if
Vale.X64.Decls.cf (va_get_flags va_s0) then 1 else 0) >= pow2_64) /\ va_state_eq va_sM
(va_update_flags va_sM (va_update_ok va_sM (va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Adc64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state
-> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ va_eval_opr64
va_s0 src + va_eval_dst_opr64 va_s0 dst + 1 < pow2_64 /\ Vale.X64.Decls.valid_cf (va_get_flags
va_s0) /\ (forall (va_x_dst:va_value_dst_opr64) (va_x_efl:Vale.X64.Flags.t) . let va_sM =
va_upd_flags va_x_efl (va_upd_operand_dst_opr64 dst va_x_dst va_s0) in va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == va_eval_dst_opr64 va_s0 dst + va_eval_opr64 va_s0 src + va_if
(Vale.X64.Decls.cf (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0) /\ Vale.X64.Decls.updated_cf
(va_get_flags va_sM) (va_eval_dst_opr64 va_s0 dst + va_eval_opr64 va_s0 src + va_if
(Vale.X64.Decls.cf (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0) >= pow2_64) ==> va_k va_sM
(())))
val va_wpProof_Adc64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Adc64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Adc64 dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Adc64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Adc64 dst src)) =
(va_QProc (va_code_Adc64 dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Adc64 dst src)
(va_wpProof_Adc64 dst src))
//--
//-- Adc64Wrap
val va_code_Adc64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Adc64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Adc64Wrap : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Adc64Wrap dst src) va_s0 /\ va_is_dst_dst_opr64 dst
va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ Vale.X64.Decls.valid_cf (va_get_flags
va_s0)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.add_wrap64 (Vale.Arch.Types.add_wrap64
(va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src)) (if Vale.X64.Decls.cf (va_get_flags
va_s0) then 1 else 0) /\ Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64
va_s0 dst + va_eval_opr64 va_s0 src + (if Vale.X64.Decls.cf (va_get_flags va_s0) then 1 else 0)
>= pow2_64) /\ va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM
(va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Adc64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\
Vale.X64.Decls.valid_cf (va_get_flags va_s0) /\ (forall (va_x_dst:va_value_dst_opr64)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_operand_dst_opr64 dst
va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.add_wrap64
(Vale.Arch.Types.add_wrap64 (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src)) (va_if
(Vale.X64.Decls.cf (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0)) /\
Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64 va_s0 dst + va_eval_opr64
va_s0 src + va_if (Vale.X64.Decls.cf (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0) >=
pow2_64) ==> va_k va_sM (())))
val va_wpProof_Adc64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Adc64Wrap dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Adc64Wrap dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Adc64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Adc64Wrap dst src)) =
(va_QProc (va_code_Adc64Wrap dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Adc64Wrap dst
src) (va_wpProof_Adc64Wrap dst src))
//--
//-- Adcx64Wrap
val va_code_Adcx64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Adcx64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Adcx64Wrap : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Adcx64Wrap dst src) va_s0 /\ va_is_dst_dst_opr64 dst
va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ adx_enabled /\ Vale.X64.Decls.valid_cf
(va_get_flags va_s0)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.add_wrap64 (Vale.Arch.Types.add_wrap64
(va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src)) (if Vale.X64.Decls.cf (va_get_flags
va_s0) then 1 else 0) /\ Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64
va_s0 dst + va_eval_opr64 va_s0 src + (if Vale.X64.Decls.cf (va_get_flags va_s0) then 1 else 0)
>= pow2_64) /\ Vale.X64.Decls.maintained_of (va_get_flags va_sM) (va_get_flags va_s0) /\
va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM (va_update_operand_dst_opr64 dst
va_sM va_s0)))))
[@ va_qattr]
let va_wp_Adcx64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ adx_enabled /\
Vale.X64.Decls.valid_cf (va_get_flags va_s0) /\ (forall (va_x_dst:va_value_dst_opr64)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_operand_dst_opr64 dst
va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.add_wrap64
(Vale.Arch.Types.add_wrap64 (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src)) (va_if
(Vale.X64.Decls.cf (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0)) /\
Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64 va_s0 dst + va_eval_opr64
va_s0 src + va_if (Vale.X64.Decls.cf (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0) >=
pow2_64) /\ Vale.X64.Decls.maintained_of (va_get_flags va_sM) (va_get_flags va_s0) ==> va_k
va_sM (())))
val va_wpProof_Adcx64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Adcx64Wrap dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Adcx64Wrap dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Adcx64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Adcx64Wrap dst src)) =
(va_QProc (va_code_Adcx64Wrap dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Adcx64Wrap
dst src) (va_wpProof_Adcx64Wrap dst src))
//--
//-- Adox64Wrap
val va_code_Adox64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Adox64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Adox64Wrap : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Adox64Wrap dst src) va_s0 /\ va_is_dst_dst_opr64 dst
va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ adx_enabled /\ Vale.X64.Decls.valid_of
(va_get_flags va_s0)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.add_wrap64 (Vale.Arch.Types.add_wrap64
(va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src)) (if Vale.X64.Decls.overflow
(va_get_flags va_s0) then 1 else 0) /\ Vale.X64.Decls.updated_of (va_get_flags va_sM)
(va_eval_dst_opr64 va_s0 dst + va_eval_opr64 va_s0 src + (if Vale.X64.Decls.overflow
(va_get_flags va_s0) then 1 else 0) >= pow2_64) /\ Vale.X64.Decls.maintained_cf (va_get_flags
va_sM) (va_get_flags va_s0) /\ va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM
(va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Adox64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ adx_enabled /\
Vale.X64.Decls.valid_of (va_get_flags va_s0) /\ (forall (va_x_dst:va_value_dst_opr64)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_operand_dst_opr64 dst
va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.add_wrap64
(Vale.Arch.Types.add_wrap64 (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src)) (va_if
(Vale.X64.Decls.overflow (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0)) /\
Vale.X64.Decls.updated_of (va_get_flags va_sM) (va_eval_dst_opr64 va_s0 dst + va_eval_opr64
va_s0 src + va_if (Vale.X64.Decls.overflow (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0) >=
pow2_64) /\ Vale.X64.Decls.maintained_cf (va_get_flags va_sM) (va_get_flags va_s0) ==> va_k
va_sM (())))
val va_wpProof_Adox64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Adox64Wrap dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Adox64Wrap dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Adox64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Adox64Wrap dst src)) =
(va_QProc (va_code_Adox64Wrap dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Adox64Wrap
dst src) (va_wpProof_Adox64Wrap dst src))
//--
//-- Sub64
val va_code_Sub64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Sub64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Sub64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Sub64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ 0 <= va_eval_dst_opr64 va_s0 dst -
va_eval_opr64 va_s0 src))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == va_eval_dst_opr64 va_s0 dst - va_eval_opr64 va_s0 src /\
va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM (va_update_operand_dst_opr64 dst
va_sM va_s0)))))
[@ va_qattr]
let va_wp_Sub64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state
-> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ 0 <=
va_eval_dst_opr64 va_s0 dst - va_eval_opr64 va_s0 src /\ (forall (va_x_dst:va_value_dst_opr64)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_operand_dst_opr64 dst
va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst == va_eval_dst_opr64 va_s0
dst - va_eval_opr64 va_s0 src ==> va_k va_sM (())))
val va_wpProof_Sub64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Sub64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Sub64 dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Sub64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Sub64 dst src)) =
(va_QProc (va_code_Sub64 dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Sub64 dst src)
(va_wpProof_Sub64 dst src))
//--
//-- Sub64Wrap
val va_code_Sub64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Sub64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Sub64Wrap : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Sub64Wrap dst src) va_s0 /\ va_is_dst_dst_opr64 dst
va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.sub_wrap64 (va_eval_dst_opr64 va_s0 dst)
(va_eval_opr64 va_s0 src) /\ Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64
va_s0 dst - va_eval_opr64 va_s0 src < 0) /\ va_state_eq va_sM (va_update_flags va_sM
(va_update_ok va_sM (va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Sub64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ (forall
(va_x_dst:va_value_dst_opr64) (va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl
(va_upd_operand_dst_opr64 dst va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst
== Vale.Arch.Types.sub_wrap64 (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src) /\
Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64 va_s0 dst - va_eval_opr64
va_s0 src < 0) ==> va_k va_sM (())))
val va_wpProof_Sub64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Sub64Wrap dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Sub64Wrap dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Sub64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Sub64Wrap dst src)) =
(va_QProc (va_code_Sub64Wrap dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Sub64Wrap dst
src) (va_wpProof_Sub64Wrap dst src))
//--
//-- Sbb64
val va_code_Sbb64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Sbb64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Sbb64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Sbb64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ Vale.X64.Decls.valid_cf (va_get_flags
va_s0)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.sub_wrap64 (va_eval_dst_opr64 va_s0 dst)
(Vale.Arch.Types.add_wrap64 (va_eval_opr64 va_s0 src) (if Vale.X64.Decls.cf (va_get_flags
va_s0) then 1 else 0)) /\ Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64
va_s0 dst - (va_eval_opr64 va_s0 src + (if Vale.X64.Decls.cf (va_get_flags va_s0) then 1 else
0)) < 0) /\ va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM
(va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Sbb64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state
-> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\
Vale.X64.Decls.valid_cf (va_get_flags va_s0) /\ (forall (va_x_dst:va_value_dst_opr64)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_operand_dst_opr64 dst
va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.sub_wrap64
(va_eval_dst_opr64 va_s0 dst) (Vale.Arch.Types.add_wrap64 (va_eval_opr64 va_s0 src) (va_if
(Vale.X64.Decls.cf (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0))) /\
Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64 va_s0 dst - (va_eval_opr64
va_s0 src + va_if (Vale.X64.Decls.cf (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0)) < 0) ==>
va_k va_sM (())))
val va_wpProof_Sbb64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Sbb64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Sbb64 dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Sbb64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Sbb64 dst src)) =
(va_QProc (va_code_Sbb64 dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Sbb64 dst src)
(va_wpProof_Sbb64 dst src))
//--
//-- Mul64Wrap
val va_code_Mul64Wrap : src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Mul64Wrap : src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Mul64Wrap : va_b0:va_code -> va_s0:va_state -> src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Mul64Wrap src) va_s0 /\ va_is_src_opr64 src va_s0 /\
va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_mul_nat pow2_64 (va_get_reg64 rRdx va_sM) + va_get_reg64 rRax va_sM == va_mul_nat
(va_get_reg64 rRax va_s0) (va_eval_opr64 va_s0 src) /\ va_state_eq va_sM (va_update_reg64 rRdx
va_sM (va_update_reg64 rRax va_sM (va_update_flags va_sM (va_update_ok va_sM va_s0))))))
[@ va_qattr]
let va_wp_Mul64Wrap (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) :
Type0 =
(va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ (forall (va_x_efl:Vale.X64.Flags.t)
(va_x_rax:nat64) (va_x_rdx:nat64) . let va_sM = va_upd_reg64 rRdx va_x_rdx (va_upd_reg64 rRax
va_x_rax (va_upd_flags va_x_efl va_s0)) in va_get_ok va_sM /\ va_mul_nat pow2_64 (va_get_reg64
rRdx va_sM) + va_get_reg64 rRax va_sM == va_mul_nat (va_get_reg64 rRax va_s0) (va_eval_opr64
va_s0 src) ==> va_k va_sM (())))
val va_wpProof_Mul64Wrap : src:va_operand_opr64 -> va_s0:va_state -> va_k:(va_state -> unit ->
Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Mul64Wrap src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Mul64Wrap src) ([va_Mod_reg64 rRdx;
va_Mod_reg64 rRax; va_Mod_flags]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Mul64Wrap (src:va_operand_opr64) : (va_quickCode unit (va_code_Mul64Wrap src)) =
(va_QProc (va_code_Mul64Wrap src) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRax; va_Mod_flags])
(va_wp_Mul64Wrap src) (va_wpProof_Mul64Wrap src))
//--
//-- Mulx64
val va_code_Mulx64 : dst_hi:va_operand_dst_opr64 -> dst_lo:va_operand_dst_opr64 ->
src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Mulx64 : dst_hi:va_operand_dst_opr64 -> dst_lo:va_operand_dst_opr64 ->
src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Mulx64 : va_b0:va_code -> va_s0:va_state -> dst_hi:va_operand_dst_opr64 ->
dst_lo:va_operand_dst_opr64 -> src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Mulx64 dst_hi dst_lo src) va_s0 /\ va_is_dst_dst_opr64
dst_hi va_s0 /\ va_is_dst_dst_opr64 dst_lo va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok
va_s0 /\ bmi2_enabled /\ dst_hi =!= dst_lo))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_mul_nat pow2_64 (va_eval_dst_opr64 va_sM dst_hi) + va_eval_dst_opr64 va_sM dst_lo ==
va_mul_nat (va_get_reg64 rRdx va_s0) (va_eval_opr64 va_s0 src) /\ va_state_eq va_sM
(va_update_ok va_sM (va_update_operand_dst_opr64 dst_lo va_sM (va_update_operand_dst_opr64
dst_hi va_sM va_s0)))))
[@ va_qattr]
let va_wp_Mulx64 (dst_hi:va_operand_dst_opr64) (dst_lo:va_operand_dst_opr64) (src:va_operand_opr64)
(va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst_hi va_s0 /\ va_is_dst_dst_opr64 dst_lo va_s0 /\ va_is_src_opr64 src
va_s0 /\ va_get_ok va_s0 /\ bmi2_enabled /\ dst_hi =!= dst_lo /\ (forall
(va_x_dst_hi:va_value_dst_opr64) (va_x_dst_lo:va_value_dst_opr64) . let va_sM =
va_upd_operand_dst_opr64 dst_lo va_x_dst_lo (va_upd_operand_dst_opr64 dst_hi va_x_dst_hi va_s0)
in va_get_ok va_sM /\ va_mul_nat pow2_64 (va_eval_dst_opr64 va_sM dst_hi) + va_eval_dst_opr64
va_sM dst_lo == va_mul_nat (va_get_reg64 rRdx va_s0) (va_eval_opr64 va_s0 src) ==> va_k va_sM
(())))
val va_wpProof_Mulx64 : dst_hi:va_operand_dst_opr64 -> dst_lo:va_operand_dst_opr64 ->
src:va_operand_opr64 -> va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Mulx64 dst_hi dst_lo src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Mulx64 dst_hi dst_lo src)
([va_mod_dst_opr64 dst_lo; va_mod_dst_opr64 dst_hi]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Mulx64 (dst_hi:va_operand_dst_opr64) (dst_lo:va_operand_dst_opr64)
(src:va_operand_opr64) : (va_quickCode unit (va_code_Mulx64 dst_hi dst_lo src)) =
(va_QProc (va_code_Mulx64 dst_hi dst_lo src) ([va_mod_dst_opr64 dst_lo; va_mod_dst_opr64 dst_hi])
(va_wp_Mulx64 dst_hi dst_lo src) (va_wpProof_Mulx64 dst_hi dst_lo src))
//--
//-- IMul64
val va_code_IMul64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_IMul64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_IMul64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_IMul64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ va_mul_nat (va_eval_dst_opr64 va_s0 dst)
(va_eval_opr64 va_s0 src) < pow2_64))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == va_mul_nat (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0
src) /\ va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM
(va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_IMul64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state
-> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ va_mul_nat
(va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src) < pow2_64 /\ (forall
(va_x_dst:va_value_dst_opr64) (va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl
(va_upd_operand_dst_opr64 dst va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst
== va_mul_nat (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src) ==> va_k va_sM (())))
val va_wpProof_IMul64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_IMul64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_IMul64 dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_IMul64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_IMul64 dst src)) =
(va_QProc (va_code_IMul64 dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_IMul64 dst src)
(va_wpProof_IMul64 dst src))
//--
//-- Xor64
val va_code_Xor64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Xor64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Xor64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Xor64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.ixor64 (va_eval_dst_opr64 va_s0 dst)
(va_eval_opr64 va_s0 src) /\ ~(Vale.X64.Decls.overflow (va_get_flags va_sM)) /\
~(Vale.X64.Decls.cf (va_get_flags va_sM)) /\ Vale.X64.Decls.valid_cf (va_get_flags va_sM) /\
Vale.X64.Decls.valid_of (va_get_flags va_sM) /\ va_state_eq va_sM (va_update_flags va_sM
(va_update_ok va_sM (va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Xor64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state
-> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ (forall
(va_x_dst:va_value_dst_opr64) (va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl
(va_upd_operand_dst_opr64 dst va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst
== Vale.Arch.Types.ixor64 (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src) /\
~(Vale.X64.Decls.overflow (va_get_flags va_sM)) /\ ~(Vale.X64.Decls.cf (va_get_flags va_sM)) /\
Vale.X64.Decls.valid_cf (va_get_flags va_sM) /\ Vale.X64.Decls.valid_of (va_get_flags va_sM)
==> va_k va_sM (())))
val va_wpProof_Xor64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Xor64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Xor64 dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Xor64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Xor64 dst src)) =
(va_QProc (va_code_Xor64 dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Xor64 dst src)
(va_wpProof_Xor64 dst src))
//--
//-- And64
val va_code_And64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_And64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_And64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_And64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.iand64 (va_eval_dst_opr64 va_s0 dst)
(va_eval_opr64 va_s0 src) /\ va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM
(va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_And64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state
-> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ (forall
(va_x_dst:va_value_dst_opr64) (va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl
(va_upd_operand_dst_opr64 dst va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst
== Vale.Arch.Types.iand64 (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src) ==> va_k
va_sM (())))
val va_wpProof_And64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_And64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_And64 dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_And64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_And64 dst src)) =
(va_QProc (va_code_And64 dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_And64 dst src)
(va_wpProof_And64 dst src))
//--
//-- Shl64
val va_code_Shl64 : dst:va_operand_dst_opr64 -> amt:va_operand_shift_amt64 -> Tot va_code
val va_codegen_success_Shl64 : dst:va_operand_dst_opr64 -> amt:va_operand_shift_amt64 -> Tot
va_pbool
val va_lemma_Shl64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
amt:va_operand_shift_amt64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Shl64 dst amt) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_shift_amt64 amt va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.ishl64 (va_eval_dst_opr64 va_s0 dst)
(va_eval_shift_amt64 va_s0 amt) /\ va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM
(va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Shl64 (dst:va_operand_dst_opr64) (amt:va_operand_shift_amt64) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_shift_amt64 amt va_s0 /\ va_get_ok va_s0 /\ (forall
(va_x_dst:va_value_dst_opr64) (va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl
(va_upd_operand_dst_opr64 dst va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst
== Vale.Arch.Types.ishl64 (va_eval_dst_opr64 va_s0 dst) (va_eval_shift_amt64 va_s0 amt) ==>
va_k va_sM (())))
val va_wpProof_Shl64 : dst:va_operand_dst_opr64 -> amt:va_operand_shift_amt64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Shl64 dst amt va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Shl64 dst amt) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Shl64 (dst:va_operand_dst_opr64) (amt:va_operand_shift_amt64) : (va_quickCode unit
(va_code_Shl64 dst amt)) =
(va_QProc (va_code_Shl64 dst amt) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Shl64 dst amt)
(va_wpProof_Shl64 dst amt))
//--
//-- Shr64
val va_code_Shr64 : dst:va_operand_dst_opr64 -> amt:va_operand_shift_amt64 -> Tot va_code
val va_codegen_success_Shr64 : dst:va_operand_dst_opr64 -> amt:va_operand_shift_amt64 -> Tot
va_pbool
val va_lemma_Shr64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
amt:va_operand_shift_amt64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Shr64 dst amt) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_shift_amt64 amt va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.ishr64 (va_eval_dst_opr64 va_s0 dst)
(va_eval_shift_amt64 va_s0 amt) /\ va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM
(va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Shr64 (dst:va_operand_dst_opr64) (amt:va_operand_shift_amt64) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_shift_amt64 amt va_s0 /\ va_get_ok va_s0 /\ (forall
(va_x_dst:va_value_dst_opr64) (va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl
(va_upd_operand_dst_opr64 dst va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst
== Vale.Arch.Types.ishr64 (va_eval_dst_opr64 va_s0 dst) (va_eval_shift_amt64 va_s0 amt) ==>
va_k va_sM (())))
val va_wpProof_Shr64 : dst:va_operand_dst_opr64 -> amt:va_operand_shift_amt64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Shr64 dst amt va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Shr64 dst amt) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Shr64 (dst:va_operand_dst_opr64) (amt:va_operand_shift_amt64) : (va_quickCode unit
(va_code_Shr64 dst amt)) =
(va_QProc (va_code_Shr64 dst amt) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Shr64 dst amt)
(va_wpProof_Shr64 dst amt))
//--
//-- Cpuid_AES
val va_code_Cpuid_AES : va_dummy:unit -> Tot va_code
val va_codegen_success_Cpuid_AES : va_dummy:unit -> Tot va_pbool
val va_lemma_Cpuid_AES : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Cpuid_AES ()) va_s0 /\ va_get_ok va_s0 /\ va_get_reg64
rRax va_s0 = 1))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 33554432 > 0 == aesni_enabled /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 2 > 0 == pclmulqdq_enabled /\ va_state_eq
va_sM (va_update_reg64 rRdx va_sM (va_update_reg64 rRcx va_sM (va_update_reg64 rRbx va_sM
(va_update_reg64 rRax va_sM (va_update_ok va_sM va_s0)))))))
[@ va_qattr]
let va_wp_Cpuid_AES (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ va_get_reg64 rRax va_s0 = 1 /\ (forall (va_x_rax:nat64) (va_x_rbx:nat64)
(va_x_rcx:nat64) (va_x_rdx:nat64) . let va_sM = va_upd_reg64 rRdx va_x_rdx (va_upd_reg64 rRcx
va_x_rcx (va_upd_reg64 rRbx va_x_rbx (va_upd_reg64 rRax va_x_rax va_s0))) in va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 33554432 > 0 == aesni_enabled /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 2 > 0 == pclmulqdq_enabled ==> va_k va_sM
(())))
val va_wpProof_Cpuid_AES : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Cpuid_AES va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Cpuid_AES ()) ([va_Mod_reg64 rRdx;
va_Mod_reg64 rRcx; va_Mod_reg64 rRbx; va_Mod_reg64 rRax]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Cpuid_AES () : (va_quickCode unit (va_code_Cpuid_AES ())) =
(va_QProc (va_code_Cpuid_AES ()) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRcx; va_Mod_reg64 rRbx;
va_Mod_reg64 rRax]) va_wp_Cpuid_AES va_wpProof_Cpuid_AES)
//--
//-- Cpuid_Sha
val va_code_Cpuid_Sha : va_dummy:unit -> Tot va_code
val va_codegen_success_Cpuid_Sha : va_dummy:unit -> Tot va_pbool
val va_lemma_Cpuid_Sha : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Cpuid_Sha ()) va_s0 /\ va_get_ok va_s0 /\ va_get_reg64
rRax va_s0 = 7 /\ va_get_reg64 rRcx va_s0 = 0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 536870912 > 0 == sha_enabled /\ va_state_eq
va_sM (va_update_reg64 rRdx va_sM (va_update_reg64 rRcx va_sM (va_update_reg64 rRbx va_sM
(va_update_reg64 rRax va_sM (va_update_ok va_sM va_s0)))))))
[@ va_qattr]
let va_wp_Cpuid_Sha (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ va_get_reg64 rRax va_s0 = 7 /\ va_get_reg64 rRcx va_s0 = 0 /\ (forall
(va_x_rax:nat64) (va_x_rbx:nat64) (va_x_rcx:nat64) (va_x_rdx:nat64) . let va_sM = va_upd_reg64
rRdx va_x_rdx (va_upd_reg64 rRcx va_x_rcx (va_upd_reg64 rRbx va_x_rbx (va_upd_reg64 rRax
va_x_rax va_s0))) in va_get_ok va_sM /\ Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM)
536870912 > 0 == sha_enabled ==> va_k va_sM (())))
val va_wpProof_Cpuid_Sha : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Cpuid_Sha va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Cpuid_Sha ()) ([va_Mod_reg64 rRdx;
va_Mod_reg64 rRcx; va_Mod_reg64 rRbx; va_Mod_reg64 rRax]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Cpuid_Sha () : (va_quickCode unit (va_code_Cpuid_Sha ())) =
(va_QProc (va_code_Cpuid_Sha ()) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRcx; va_Mod_reg64 rRbx;
va_Mod_reg64 rRax]) va_wp_Cpuid_Sha va_wpProof_Cpuid_Sha)
//--
//-- Cpuid_Adx_Bmi2
val va_code_Cpuid_Adx_Bmi2 : va_dummy:unit -> Tot va_code
val va_codegen_success_Cpuid_Adx_Bmi2 : va_dummy:unit -> Tot va_pbool
val va_lemma_Cpuid_Adx_Bmi2 : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Cpuid_Adx_Bmi2 ()) va_s0 /\ va_get_ok va_s0 /\
va_get_reg64 rRax va_s0 = 7 /\ va_get_reg64 rRcx va_s0 = 0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 256 > 0 == bmi2_enabled /\
Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 524288 > 0 == adx_enabled /\ va_state_eq va_sM
(va_update_reg64 rRdx va_sM (va_update_reg64 rRcx va_sM (va_update_reg64 rRbx va_sM
(va_update_reg64 rRax va_sM (va_update_ok va_sM va_s0)))))))
[@ va_qattr]
let va_wp_Cpuid_Adx_Bmi2 (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ va_get_reg64 rRax va_s0 = 7 /\ va_get_reg64 rRcx va_s0 = 0 /\ (forall
(va_x_rax:nat64) (va_x_rbx:nat64) (va_x_rcx:nat64) (va_x_rdx:nat64) . let va_sM = va_upd_reg64
rRdx va_x_rdx (va_upd_reg64 rRcx va_x_rcx (va_upd_reg64 rRbx va_x_rbx (va_upd_reg64 rRax
va_x_rax va_s0))) in va_get_ok va_sM /\ Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 256 >
0 == bmi2_enabled /\ Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 524288 > 0 == adx_enabled
==> va_k va_sM (())))
val va_wpProof_Cpuid_Adx_Bmi2 : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Cpuid_Adx_Bmi2 va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Cpuid_Adx_Bmi2 ()) ([va_Mod_reg64
rRdx; va_Mod_reg64 rRcx; va_Mod_reg64 rRbx; va_Mod_reg64 rRax]) va_s0 va_k ((va_sM, va_f0,
va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Cpuid_Adx_Bmi2 () : (va_quickCode unit (va_code_Cpuid_Adx_Bmi2 ())) =
(va_QProc (va_code_Cpuid_Adx_Bmi2 ()) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRcx; va_Mod_reg64 rRbx;
va_Mod_reg64 rRax]) va_wp_Cpuid_Adx_Bmi2 va_wpProof_Cpuid_Adx_Bmi2)
//--
//-- Cpuid_Avx
val va_code_Cpuid_Avx : va_dummy:unit -> Tot va_code
val va_codegen_success_Cpuid_Avx : va_dummy:unit -> Tot va_pbool
val va_lemma_Cpuid_Avx : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Cpuid_Avx ()) va_s0 /\ va_get_ok va_s0 /\ va_get_reg64
rRax va_s0 = 1))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 268435456 > 0 == avx_cpuid_enabled /\
va_state_eq va_sM (va_update_reg64 rRdx va_sM (va_update_reg64 rRcx va_sM (va_update_reg64 rRbx
va_sM (va_update_reg64 rRax va_sM (va_update_ok va_sM va_s0)))))))
[@ va_qattr]
let va_wp_Cpuid_Avx (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ va_get_reg64 rRax va_s0 = 1 /\ (forall (va_x_rax:nat64) (va_x_rbx:nat64)
(va_x_rcx:nat64) (va_x_rdx:nat64) . let va_sM = va_upd_reg64 rRdx va_x_rdx (va_upd_reg64 rRcx
va_x_rcx (va_upd_reg64 rRbx va_x_rbx (va_upd_reg64 rRax va_x_rax va_s0))) in va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 268435456 > 0 == avx_cpuid_enabled ==> va_k
va_sM (())))
val va_wpProof_Cpuid_Avx : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Cpuid_Avx va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Cpuid_Avx ()) ([va_Mod_reg64 rRdx;
va_Mod_reg64 rRcx; va_Mod_reg64 rRbx; va_Mod_reg64 rRax]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Cpuid_Avx () : (va_quickCode unit (va_code_Cpuid_Avx ())) =
(va_QProc (va_code_Cpuid_Avx ()) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRcx; va_Mod_reg64 rRbx;
va_Mod_reg64 rRax]) va_wp_Cpuid_Avx va_wpProof_Cpuid_Avx)
//--
//-- Cpuid_Avx2
val va_code_Cpuid_Avx2 : va_dummy:unit -> Tot va_code
val va_codegen_success_Cpuid_Avx2 : va_dummy:unit -> Tot va_pbool
val va_lemma_Cpuid_Avx2 : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Cpuid_Avx2 ()) va_s0 /\ va_get_ok va_s0 /\
va_get_reg64 rRax va_s0 = 7 /\ va_get_reg64 rRcx va_s0 = 0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 32 > 0 == avx2_cpuid_enabled /\ va_state_eq
va_sM (va_update_reg64 rRdx va_sM (va_update_reg64 rRcx va_sM (va_update_reg64 rRbx va_sM
(va_update_reg64 rRax va_sM (va_update_ok va_sM va_s0)))))))
[@ va_qattr]
let va_wp_Cpuid_Avx2 (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ va_get_reg64 rRax va_s0 = 7 /\ va_get_reg64 rRcx va_s0 = 0 /\ (forall
(va_x_rax:nat64) (va_x_rbx:nat64) (va_x_rcx:nat64) (va_x_rdx:nat64) . let va_sM = va_upd_reg64
rRdx va_x_rdx (va_upd_reg64 rRcx va_x_rcx (va_upd_reg64 rRbx va_x_rbx (va_upd_reg64 rRax
va_x_rax va_s0))) in va_get_ok va_sM /\ Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 32 > 0
== avx2_cpuid_enabled ==> va_k va_sM (())))
val va_wpProof_Cpuid_Avx2 : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Cpuid_Avx2 va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Cpuid_Avx2 ()) ([va_Mod_reg64 rRdx;
va_Mod_reg64 rRcx; va_Mod_reg64 rRbx; va_Mod_reg64 rRax]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Cpuid_Avx2 () : (va_quickCode unit (va_code_Cpuid_Avx2 ())) =
(va_QProc (va_code_Cpuid_Avx2 ()) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRcx; va_Mod_reg64 rRbx;
va_Mod_reg64 rRax]) va_wp_Cpuid_Avx2 va_wpProof_Cpuid_Avx2)
//--
//-- Cpuid_Sse
val va_code_Cpuid_Sse : va_dummy:unit -> Tot va_code
val va_codegen_success_Cpuid_Sse : va_dummy:unit -> Tot va_pbool
val va_lemma_Cpuid_Sse : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Cpuid_Sse ()) va_s0 /\ va_get_ok va_s0 /\ va_get_reg64
rRax va_s0 = 1))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRdx va_sM) 67108864 > 0 == sse2_enabled /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 524288 > 0 == sse4_1_enabled /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 512 > 0 == ssse3_enabled /\ va_state_eq va_sM
(va_update_reg64 rRdx va_sM (va_update_reg64 rRcx va_sM (va_update_reg64 rRbx va_sM
(va_update_reg64 rRax va_sM (va_update_ok va_sM va_s0)))))))
[@ va_qattr]
let va_wp_Cpuid_Sse (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ va_get_reg64 rRax va_s0 = 1 /\ (forall (va_x_rax:nat64) (va_x_rbx:nat64)
(va_x_rcx:nat64) (va_x_rdx:nat64) . let va_sM = va_upd_reg64 rRdx va_x_rdx (va_upd_reg64 rRcx
va_x_rcx (va_upd_reg64 rRbx va_x_rbx (va_upd_reg64 rRax va_x_rax va_s0))) in va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRdx va_sM) 67108864 > 0 == sse2_enabled /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 524288 > 0 == sse4_1_enabled /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 512 > 0 == ssse3_enabled ==> va_k va_sM (())))
val va_wpProof_Cpuid_Sse : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Cpuid_Sse va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Cpuid_Sse ()) ([va_Mod_reg64 rRdx;
va_Mod_reg64 rRcx; va_Mod_reg64 rRbx; va_Mod_reg64 rRax]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Cpuid_Sse () : (va_quickCode unit (va_code_Cpuid_Sse ())) =
(va_QProc (va_code_Cpuid_Sse ()) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRcx; va_Mod_reg64 rRbx;
va_Mod_reg64 rRax]) va_wp_Cpuid_Sse va_wpProof_Cpuid_Sse)
//--
//-- Cpuid_Movbe
val va_code_Cpuid_Movbe : va_dummy:unit -> Tot va_code
val va_codegen_success_Cpuid_Movbe : va_dummy:unit -> Tot va_pbool
val va_lemma_Cpuid_Movbe : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Cpuid_Movbe ()) va_s0 /\ va_get_ok va_s0 /\
va_get_reg64 rRax va_s0 = 1))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 4194304 > 0 == movbe_enabled /\ va_state_eq
va_sM (va_update_reg64 rRdx va_sM (va_update_reg64 rRcx va_sM (va_update_reg64 rRbx va_sM
(va_update_reg64 rRax va_sM (va_update_ok va_sM va_s0)))))))
[@ va_qattr]
let va_wp_Cpuid_Movbe (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ va_get_reg64 rRax va_s0 = 1 /\ (forall (va_x_rax:nat64) (va_x_rbx:nat64)
(va_x_rcx:nat64) (va_x_rdx:nat64) . let va_sM = va_upd_reg64 rRdx va_x_rdx (va_upd_reg64 rRcx
va_x_rcx (va_upd_reg64 rRbx va_x_rbx (va_upd_reg64 rRax va_x_rax va_s0))) in va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 4194304 > 0 == movbe_enabled ==> va_k va_sM
(())))
val va_wpProof_Cpuid_Movbe : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Cpuid_Movbe va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Cpuid_Movbe ()) ([va_Mod_reg64 rRdx;
va_Mod_reg64 rRcx; va_Mod_reg64 rRbx; va_Mod_reg64 rRax]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Cpuid_Movbe () : (va_quickCode unit (va_code_Cpuid_Movbe ())) =
(va_QProc (va_code_Cpuid_Movbe ()) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRcx; va_Mod_reg64 rRbx;
va_Mod_reg64 rRax]) va_wp_Cpuid_Movbe va_wpProof_Cpuid_Movbe)
//--
//-- Cpuid_Rdrand
val va_code_Cpuid_Rdrand : va_dummy:unit -> Tot va_code
val va_codegen_success_Cpuid_Rdrand : va_dummy:unit -> Tot va_pbool
val va_lemma_Cpuid_Rdrand : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Cpuid_Rdrand ()) va_s0 /\ va_get_ok va_s0 /\
va_get_reg64 rRax va_s0 = 1))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 1073741824 > 0 == rdrand_enabled /\
va_state_eq va_sM (va_update_reg64 rRdx va_sM (va_update_reg64 rRcx va_sM (va_update_reg64 rRbx
va_sM (va_update_reg64 rRax va_sM (va_update_ok va_sM va_s0)))))))
[@ va_qattr]
let va_wp_Cpuid_Rdrand (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ va_get_reg64 rRax va_s0 = 1 /\ (forall (va_x_rax:nat64) (va_x_rbx:nat64)
(va_x_rcx:nat64) (va_x_rdx:nat64) . let va_sM = va_upd_reg64 rRdx va_x_rdx (va_upd_reg64 rRcx
va_x_rcx (va_upd_reg64 rRbx va_x_rbx (va_upd_reg64 rRax va_x_rax va_s0))) in va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 1073741824 > 0 == rdrand_enabled ==> va_k
va_sM (())))
val va_wpProof_Cpuid_Rdrand : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Cpuid_Rdrand va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Cpuid_Rdrand ()) ([va_Mod_reg64 rRdx;
va_Mod_reg64 rRcx; va_Mod_reg64 rRbx; va_Mod_reg64 rRax]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Cpuid_Rdrand () : (va_quickCode unit (va_code_Cpuid_Rdrand ())) =
(va_QProc (va_code_Cpuid_Rdrand ()) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRcx; va_Mod_reg64 rRbx;
va_Mod_reg64 rRax]) va_wp_Cpuid_Rdrand va_wpProof_Cpuid_Rdrand)
//--
//-- Cpuid_Avx512
val va_code_Cpuid_Avx512 : va_dummy:unit -> Tot va_code
val va_codegen_success_Cpuid_Avx512 : va_dummy:unit -> Tot va_pbool
val va_lemma_Cpuid_Avx512 : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Cpuid_Avx512 ()) va_s0 /\ va_get_ok va_s0 /\
va_get_reg64 rRax va_s0 = 7 /\ va_get_reg64 rRcx va_s0 = 0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 65536 > 0 == avx512f_enabled /\
Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 131072 > 0 == avx512dq_enabled /\
Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 1073741824 > 0 == avx512bw_enabled /\
Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 2147483648 > 0 == avx512vl_enabled /\
va_state_eq va_sM (va_update_reg64 rRdx va_sM (va_update_reg64 rRcx va_sM (va_update_reg64 rRbx
va_sM (va_update_reg64 rRax va_sM (va_update_ok va_sM va_s0)))))))
[@ va_qattr]
let va_wp_Cpuid_Avx512 (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ va_get_reg64 rRax va_s0 = 7 /\ va_get_reg64 rRcx va_s0 = 0 /\ (forall
(va_x_rax:nat64) (va_x_rbx:nat64) (va_x_rcx:nat64) (va_x_rdx:nat64) . let va_sM = va_upd_reg64
rRdx va_x_rdx (va_upd_reg64 rRcx va_x_rcx (va_upd_reg64 rRbx va_x_rbx (va_upd_reg64 rRax
va_x_rax va_s0))) in va_get_ok va_sM /\ Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 65536
> 0 == avx512f_enabled /\ Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 131072 > 0 ==
avx512dq_enabled /\ Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 1073741824 > 0 ==
avx512bw_enabled /\ Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 2147483648 > 0 ==
avx512vl_enabled ==> va_k va_sM (())))
val va_wpProof_Cpuid_Avx512 : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Cpuid_Avx512 va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Cpuid_Avx512 ()) ([va_Mod_reg64 rRdx;
va_Mod_reg64 rRcx; va_Mod_reg64 rRbx; va_Mod_reg64 rRax]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Cpuid_Avx512 () : (va_quickCode unit (va_code_Cpuid_Avx512 ())) =
(va_QProc (va_code_Cpuid_Avx512 ()) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRcx; va_Mod_reg64 rRbx;
va_Mod_reg64 rRax]) va_wp_Cpuid_Avx512 va_wpProof_Cpuid_Avx512)
//--
//-- Cpuid_Osxsave
val va_code_Cpuid_Osxsave : va_dummy:unit -> Tot va_code
val va_codegen_success_Cpuid_Osxsave : va_dummy:unit -> Tot va_pbool
val va_lemma_Cpuid_Osxsave : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Cpuid_Osxsave ()) va_s0 /\ va_get_ok va_s0 /\
va_get_reg64 rRax va_s0 = 1))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 134217728 > 0 == osxsave_enabled /\
va_state_eq va_sM (va_update_reg64 rRdx va_sM (va_update_reg64 rRcx va_sM (va_update_reg64 rRbx
va_sM (va_update_reg64 rRax va_sM (va_update_ok va_sM va_s0)))))))
[@ va_qattr]
let va_wp_Cpuid_Osxsave (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ va_get_reg64 rRax va_s0 = 1 /\ (forall (va_x_rax:nat64) (va_x_rbx:nat64)
(va_x_rcx:nat64) (va_x_rdx:nat64) . let va_sM = va_upd_reg64 rRdx va_x_rdx (va_upd_reg64 rRcx
va_x_rcx (va_upd_reg64 rRbx va_x_rbx (va_upd_reg64 rRax va_x_rax va_s0))) in va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 134217728 > 0 == osxsave_enabled ==> va_k
va_sM (())))
val va_wpProof_Cpuid_Osxsave : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Cpuid_Osxsave va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Cpuid_Osxsave ()) ([va_Mod_reg64 rRdx;
va_Mod_reg64 rRcx; va_Mod_reg64 rRbx; va_Mod_reg64 rRax]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr] | false | false | Vale.X64.InsBasic.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val va_quick_Cpuid_Osxsave: Prims.unit -> (va_quickCode unit (va_code_Cpuid_Osxsave ())) | [] | Vale.X64.InsBasic.va_quick_Cpuid_Osxsave | {
"file_name": "obj/Vale.X64.InsBasic.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | _: Prims.unit
-> Vale.X64.QuickCode.va_quickCode Prims.unit (Vale.X64.InsBasic.va_code_Cpuid_Osxsave ()) | {
"end_col": 69,
"end_line": 1091,
"start_col": 2,
"start_line": 1090
} |
Prims.Tot | val va_quick_NoNewline: Prims.unit -> (va_quickCode unit (va_code_NoNewline ())) | [
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let va_quick_NoNewline () : (va_quickCode unit (va_code_NoNewline ())) =
(va_QProc (va_code_NoNewline ()) ([]) va_wp_NoNewline va_wpProof_NoNewline) | val va_quick_NoNewline: Prims.unit -> (va_quickCode unit (va_code_NoNewline ()))
let va_quick_NoNewline () : (va_quickCode unit (va_code_NoNewline ())) = | false | null | false | (va_QProc (va_code_NoNewline ()) ([]) va_wp_NoNewline va_wpProof_NoNewline) | {
"checked_file": "Vale.X64.InsBasic.fsti.checked",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.QuickCode.fst.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.X64.Decls.fsti.checked",
"Vale.X64.CPU_Features_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.InsBasic.fsti"
} | [
"total"
] | [
"Prims.unit",
"Vale.X64.QuickCode.va_QProc",
"Vale.X64.InsBasic.va_code_NoNewline",
"Prims.Nil",
"Vale.X64.QuickCode.mod_t",
"Vale.X64.InsBasic.va_wp_NoNewline",
"Vale.X64.InsBasic.va_wpProof_NoNewline",
"Vale.X64.QuickCode.va_quickCode"
] | [] | module Vale.X64.InsBasic
open FStar.Mul
open Vale.Def.Types_s
open Vale.Arch.HeapImpl
open Vale.Arch.Types
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.X64.Decls
open Vale.X64.QuickCode
unfold let vale_heap = Vale.X64.Memory.vale_heap
unfold let vale_stack = Vale.X64.Stack_i.vale_stack
open Vale.X64.CPU_Features_s
//-- Mov64
val va_code_Mov64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Mov64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Mov64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Mov64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == va_eval_opr64 va_s0 src /\ va_state_eq va_sM (va_update_ok va_sM
(va_update_operand_dst_opr64 dst va_sM va_s0))))
[@ va_qattr]
let va_wp_Mov64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state
-> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ (forall
(va_x_dst:va_value_dst_opr64) . let va_sM = va_upd_operand_dst_opr64 dst va_x_dst va_s0 in
va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst == va_eval_opr64 va_s0 src ==> va_k va_sM (())))
val va_wpProof_Mov64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Mov64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Mov64 dst src) ([va_mod_dst_opr64
dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Mov64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Mov64 dst src)) =
(va_QProc (va_code_Mov64 dst src) ([va_mod_dst_opr64 dst]) (va_wp_Mov64 dst src)
(va_wpProof_Mov64 dst src))
//--
//-- Cmovc64
val va_code_Cmovc64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Cmovc64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Cmovc64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Cmovc64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst
va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ Vale.X64.Decls.valid_cf (va_get_flags
va_s0)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\ (if
Vale.X64.Decls.cf (va_get_flags va_sM) then (va_eval_dst_opr64 va_sM dst = va_eval_opr64 va_s0
src) else (va_eval_dst_opr64 va_sM dst = va_eval_dst_opr64 va_s0 dst)) /\ va_state_eq va_sM
(va_update_ok va_sM (va_update_operand_dst_opr64 dst va_sM va_s0))))
[@ va_qattr]
let va_wp_Cmovc64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\
Vale.X64.Decls.valid_cf (va_get_flags va_s0) /\ (forall (va_x_dst:va_value_dst_opr64) . let
va_sM = va_upd_operand_dst_opr64 dst va_x_dst va_s0 in va_get_ok va_sM /\ va_if
(Vale.X64.Decls.cf (va_get_flags va_sM)) (fun _ -> va_eval_dst_opr64 va_sM dst = va_eval_opr64
va_s0 src) (fun _ -> va_eval_dst_opr64 va_sM dst = va_eval_dst_opr64 va_s0 dst) ==> va_k va_sM
(())))
val va_wpProof_Cmovc64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Cmovc64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Cmovc64 dst src) ([va_mod_dst_opr64
dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Cmovc64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Cmovc64 dst src)) =
(va_QProc (va_code_Cmovc64 dst src) ([va_mod_dst_opr64 dst]) (va_wp_Cmovc64 dst src)
(va_wpProof_Cmovc64 dst src))
//--
//-- Add64
val va_code_Add64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Add64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Add64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Add64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ va_eval_opr64 va_s0 src + va_eval_dst_opr64
va_s0 dst < pow2_64))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == va_eval_dst_opr64 va_s0 dst + va_eval_opr64 va_s0 src /\
va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM (va_update_operand_dst_opr64 dst
va_sM va_s0)))))
[@ va_qattr]
let va_wp_Add64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state
-> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ va_eval_opr64
va_s0 src + va_eval_dst_opr64 va_s0 dst < pow2_64 /\ (forall (va_x_dst:va_value_dst_opr64)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_operand_dst_opr64 dst
va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst == va_eval_dst_opr64 va_s0
dst + va_eval_opr64 va_s0 src ==> va_k va_sM (())))
val va_wpProof_Add64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Add64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Add64 dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Add64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Add64 dst src)) =
(va_QProc (va_code_Add64 dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Add64 dst src)
(va_wpProof_Add64 dst src))
//--
//-- Add64Wrap
val va_code_Add64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Add64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Add64Wrap : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Add64Wrap dst src) va_s0 /\ va_is_dst_dst_opr64 dst
va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.add_wrap64 (va_eval_dst_opr64 va_s0 dst)
(va_eval_opr64 va_s0 src) /\ Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64
va_s0 dst + va_eval_opr64 va_s0 src >= pow2_64) /\ va_state_eq va_sM (va_update_flags va_sM
(va_update_ok va_sM (va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Add64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ (forall
(va_x_dst:va_value_dst_opr64) (va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl
(va_upd_operand_dst_opr64 dst va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst
== Vale.Arch.Types.add_wrap64 (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src) /\
Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64 va_s0 dst + va_eval_opr64
va_s0 src >= pow2_64) ==> va_k va_sM (())))
val va_wpProof_Add64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Add64Wrap dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Add64Wrap dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Add64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Add64Wrap dst src)) =
(va_QProc (va_code_Add64Wrap dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Add64Wrap dst
src) (va_wpProof_Add64Wrap dst src))
//--
//-- AddLea64
val va_code_AddLea64 : dst:va_operand_dst_opr64 -> src1:va_operand_opr64 -> src2:va_operand_opr64
-> Tot va_code
val va_codegen_success_AddLea64 : dst:va_operand_dst_opr64 -> src1:va_operand_opr64 ->
src2:va_operand_opr64 -> Tot va_pbool
val va_lemma_AddLea64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src1:va_operand_opr64 -> src2:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_AddLea64 dst src1 src2) va_s0 /\ va_is_dst_dst_opr64
dst va_s0 /\ va_is_src_opr64 src1 va_s0 /\ va_is_src_opr64 src2 va_s0 /\ va_get_ok va_s0 /\
Vale.X64.Decls.max_one_mem src1 src2 /\ va_eval_opr64 va_s0 src1 + va_eval_opr64 va_s0 src2 <
pow2_64))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == va_eval_opr64 va_s0 src1 + va_eval_opr64 va_s0 src2 /\
va_state_eq va_sM (va_update_ok va_sM (va_update_operand_dst_opr64 dst va_sM va_s0))))
[@ va_qattr]
let va_wp_AddLea64 (dst:va_operand_dst_opr64) (src1:va_operand_opr64) (src2:va_operand_opr64)
(va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src1 va_s0 /\ va_is_src_opr64 src2 va_s0 /\
va_get_ok va_s0 /\ Vale.X64.Decls.max_one_mem src1 src2 /\ va_eval_opr64 va_s0 src1 +
va_eval_opr64 va_s0 src2 < pow2_64 /\ (forall (va_x_dst:va_value_dst_opr64) . let va_sM =
va_upd_operand_dst_opr64 dst va_x_dst va_s0 in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst
== va_eval_opr64 va_s0 src1 + va_eval_opr64 va_s0 src2 ==> va_k va_sM (())))
val va_wpProof_AddLea64 : dst:va_operand_dst_opr64 -> src1:va_operand_opr64 ->
src2:va_operand_opr64 -> va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_AddLea64 dst src1 src2 va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_AddLea64 dst src1 src2)
([va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_AddLea64 (dst:va_operand_dst_opr64) (src1:va_operand_opr64) (src2:va_operand_opr64) :
(va_quickCode unit (va_code_AddLea64 dst src1 src2)) =
(va_QProc (va_code_AddLea64 dst src1 src2) ([va_mod_dst_opr64 dst]) (va_wp_AddLea64 dst src1
src2) (va_wpProof_AddLea64 dst src1 src2))
//--
//-- Adc64
val va_code_Adc64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Adc64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Adc64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Adc64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ va_eval_opr64 va_s0 src + va_eval_dst_opr64
va_s0 dst + 1 < pow2_64 /\ Vale.X64.Decls.valid_cf (va_get_flags va_s0)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == va_eval_dst_opr64 va_s0 dst + va_eval_opr64 va_s0 src + (if
Vale.X64.Decls.cf (va_get_flags va_s0) then 1 else 0) /\ Vale.X64.Decls.updated_cf
(va_get_flags va_sM) (va_eval_dst_opr64 va_s0 dst + va_eval_opr64 va_s0 src + (if
Vale.X64.Decls.cf (va_get_flags va_s0) then 1 else 0) >= pow2_64) /\ va_state_eq va_sM
(va_update_flags va_sM (va_update_ok va_sM (va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Adc64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state
-> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ va_eval_opr64
va_s0 src + va_eval_dst_opr64 va_s0 dst + 1 < pow2_64 /\ Vale.X64.Decls.valid_cf (va_get_flags
va_s0) /\ (forall (va_x_dst:va_value_dst_opr64) (va_x_efl:Vale.X64.Flags.t) . let va_sM =
va_upd_flags va_x_efl (va_upd_operand_dst_opr64 dst va_x_dst va_s0) in va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == va_eval_dst_opr64 va_s0 dst + va_eval_opr64 va_s0 src + va_if
(Vale.X64.Decls.cf (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0) /\ Vale.X64.Decls.updated_cf
(va_get_flags va_sM) (va_eval_dst_opr64 va_s0 dst + va_eval_opr64 va_s0 src + va_if
(Vale.X64.Decls.cf (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0) >= pow2_64) ==> va_k va_sM
(())))
val va_wpProof_Adc64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Adc64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Adc64 dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Adc64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Adc64 dst src)) =
(va_QProc (va_code_Adc64 dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Adc64 dst src)
(va_wpProof_Adc64 dst src))
//--
//-- Adc64Wrap
val va_code_Adc64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Adc64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Adc64Wrap : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Adc64Wrap dst src) va_s0 /\ va_is_dst_dst_opr64 dst
va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ Vale.X64.Decls.valid_cf (va_get_flags
va_s0)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.add_wrap64 (Vale.Arch.Types.add_wrap64
(va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src)) (if Vale.X64.Decls.cf (va_get_flags
va_s0) then 1 else 0) /\ Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64
va_s0 dst + va_eval_opr64 va_s0 src + (if Vale.X64.Decls.cf (va_get_flags va_s0) then 1 else 0)
>= pow2_64) /\ va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM
(va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Adc64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\
Vale.X64.Decls.valid_cf (va_get_flags va_s0) /\ (forall (va_x_dst:va_value_dst_opr64)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_operand_dst_opr64 dst
va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.add_wrap64
(Vale.Arch.Types.add_wrap64 (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src)) (va_if
(Vale.X64.Decls.cf (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0)) /\
Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64 va_s0 dst + va_eval_opr64
va_s0 src + va_if (Vale.X64.Decls.cf (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0) >=
pow2_64) ==> va_k va_sM (())))
val va_wpProof_Adc64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Adc64Wrap dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Adc64Wrap dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Adc64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Adc64Wrap dst src)) =
(va_QProc (va_code_Adc64Wrap dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Adc64Wrap dst
src) (va_wpProof_Adc64Wrap dst src))
//--
//-- Adcx64Wrap
val va_code_Adcx64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Adcx64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Adcx64Wrap : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Adcx64Wrap dst src) va_s0 /\ va_is_dst_dst_opr64 dst
va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ adx_enabled /\ Vale.X64.Decls.valid_cf
(va_get_flags va_s0)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.add_wrap64 (Vale.Arch.Types.add_wrap64
(va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src)) (if Vale.X64.Decls.cf (va_get_flags
va_s0) then 1 else 0) /\ Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64
va_s0 dst + va_eval_opr64 va_s0 src + (if Vale.X64.Decls.cf (va_get_flags va_s0) then 1 else 0)
>= pow2_64) /\ Vale.X64.Decls.maintained_of (va_get_flags va_sM) (va_get_flags va_s0) /\
va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM (va_update_operand_dst_opr64 dst
va_sM va_s0)))))
[@ va_qattr]
let va_wp_Adcx64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ adx_enabled /\
Vale.X64.Decls.valid_cf (va_get_flags va_s0) /\ (forall (va_x_dst:va_value_dst_opr64)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_operand_dst_opr64 dst
va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.add_wrap64
(Vale.Arch.Types.add_wrap64 (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src)) (va_if
(Vale.X64.Decls.cf (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0)) /\
Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64 va_s0 dst + va_eval_opr64
va_s0 src + va_if (Vale.X64.Decls.cf (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0) >=
pow2_64) /\ Vale.X64.Decls.maintained_of (va_get_flags va_sM) (va_get_flags va_s0) ==> va_k
va_sM (())))
val va_wpProof_Adcx64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Adcx64Wrap dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Adcx64Wrap dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Adcx64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Adcx64Wrap dst src)) =
(va_QProc (va_code_Adcx64Wrap dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Adcx64Wrap
dst src) (va_wpProof_Adcx64Wrap dst src))
//--
//-- Adox64Wrap
val va_code_Adox64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Adox64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Adox64Wrap : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Adox64Wrap dst src) va_s0 /\ va_is_dst_dst_opr64 dst
va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ adx_enabled /\ Vale.X64.Decls.valid_of
(va_get_flags va_s0)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.add_wrap64 (Vale.Arch.Types.add_wrap64
(va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src)) (if Vale.X64.Decls.overflow
(va_get_flags va_s0) then 1 else 0) /\ Vale.X64.Decls.updated_of (va_get_flags va_sM)
(va_eval_dst_opr64 va_s0 dst + va_eval_opr64 va_s0 src + (if Vale.X64.Decls.overflow
(va_get_flags va_s0) then 1 else 0) >= pow2_64) /\ Vale.X64.Decls.maintained_cf (va_get_flags
va_sM) (va_get_flags va_s0) /\ va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM
(va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Adox64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ adx_enabled /\
Vale.X64.Decls.valid_of (va_get_flags va_s0) /\ (forall (va_x_dst:va_value_dst_opr64)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_operand_dst_opr64 dst
va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.add_wrap64
(Vale.Arch.Types.add_wrap64 (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src)) (va_if
(Vale.X64.Decls.overflow (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0)) /\
Vale.X64.Decls.updated_of (va_get_flags va_sM) (va_eval_dst_opr64 va_s0 dst + va_eval_opr64
va_s0 src + va_if (Vale.X64.Decls.overflow (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0) >=
pow2_64) /\ Vale.X64.Decls.maintained_cf (va_get_flags va_sM) (va_get_flags va_s0) ==> va_k
va_sM (())))
val va_wpProof_Adox64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Adox64Wrap dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Adox64Wrap dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Adox64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Adox64Wrap dst src)) =
(va_QProc (va_code_Adox64Wrap dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Adox64Wrap
dst src) (va_wpProof_Adox64Wrap dst src))
//--
//-- Sub64
val va_code_Sub64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Sub64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Sub64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Sub64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ 0 <= va_eval_dst_opr64 va_s0 dst -
va_eval_opr64 va_s0 src))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == va_eval_dst_opr64 va_s0 dst - va_eval_opr64 va_s0 src /\
va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM (va_update_operand_dst_opr64 dst
va_sM va_s0)))))
[@ va_qattr]
let va_wp_Sub64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state
-> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ 0 <=
va_eval_dst_opr64 va_s0 dst - va_eval_opr64 va_s0 src /\ (forall (va_x_dst:va_value_dst_opr64)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_operand_dst_opr64 dst
va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst == va_eval_dst_opr64 va_s0
dst - va_eval_opr64 va_s0 src ==> va_k va_sM (())))
val va_wpProof_Sub64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Sub64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Sub64 dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Sub64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Sub64 dst src)) =
(va_QProc (va_code_Sub64 dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Sub64 dst src)
(va_wpProof_Sub64 dst src))
//--
//-- Sub64Wrap
val va_code_Sub64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Sub64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Sub64Wrap : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Sub64Wrap dst src) va_s0 /\ va_is_dst_dst_opr64 dst
va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.sub_wrap64 (va_eval_dst_opr64 va_s0 dst)
(va_eval_opr64 va_s0 src) /\ Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64
va_s0 dst - va_eval_opr64 va_s0 src < 0) /\ va_state_eq va_sM (va_update_flags va_sM
(va_update_ok va_sM (va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Sub64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ (forall
(va_x_dst:va_value_dst_opr64) (va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl
(va_upd_operand_dst_opr64 dst va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst
== Vale.Arch.Types.sub_wrap64 (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src) /\
Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64 va_s0 dst - va_eval_opr64
va_s0 src < 0) ==> va_k va_sM (())))
val va_wpProof_Sub64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Sub64Wrap dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Sub64Wrap dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Sub64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Sub64Wrap dst src)) =
(va_QProc (va_code_Sub64Wrap dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Sub64Wrap dst
src) (va_wpProof_Sub64Wrap dst src))
//--
//-- Sbb64
val va_code_Sbb64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Sbb64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Sbb64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Sbb64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ Vale.X64.Decls.valid_cf (va_get_flags
va_s0)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.sub_wrap64 (va_eval_dst_opr64 va_s0 dst)
(Vale.Arch.Types.add_wrap64 (va_eval_opr64 va_s0 src) (if Vale.X64.Decls.cf (va_get_flags
va_s0) then 1 else 0)) /\ Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64
va_s0 dst - (va_eval_opr64 va_s0 src + (if Vale.X64.Decls.cf (va_get_flags va_s0) then 1 else
0)) < 0) /\ va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM
(va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Sbb64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state
-> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\
Vale.X64.Decls.valid_cf (va_get_flags va_s0) /\ (forall (va_x_dst:va_value_dst_opr64)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_operand_dst_opr64 dst
va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.sub_wrap64
(va_eval_dst_opr64 va_s0 dst) (Vale.Arch.Types.add_wrap64 (va_eval_opr64 va_s0 src) (va_if
(Vale.X64.Decls.cf (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0))) /\
Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64 va_s0 dst - (va_eval_opr64
va_s0 src + va_if (Vale.X64.Decls.cf (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0)) < 0) ==>
va_k va_sM (())))
val va_wpProof_Sbb64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Sbb64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Sbb64 dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Sbb64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Sbb64 dst src)) =
(va_QProc (va_code_Sbb64 dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Sbb64 dst src)
(va_wpProof_Sbb64 dst src))
//--
//-- Mul64Wrap
val va_code_Mul64Wrap : src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Mul64Wrap : src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Mul64Wrap : va_b0:va_code -> va_s0:va_state -> src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Mul64Wrap src) va_s0 /\ va_is_src_opr64 src va_s0 /\
va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_mul_nat pow2_64 (va_get_reg64 rRdx va_sM) + va_get_reg64 rRax va_sM == va_mul_nat
(va_get_reg64 rRax va_s0) (va_eval_opr64 va_s0 src) /\ va_state_eq va_sM (va_update_reg64 rRdx
va_sM (va_update_reg64 rRax va_sM (va_update_flags va_sM (va_update_ok va_sM va_s0))))))
[@ va_qattr]
let va_wp_Mul64Wrap (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) :
Type0 =
(va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ (forall (va_x_efl:Vale.X64.Flags.t)
(va_x_rax:nat64) (va_x_rdx:nat64) . let va_sM = va_upd_reg64 rRdx va_x_rdx (va_upd_reg64 rRax
va_x_rax (va_upd_flags va_x_efl va_s0)) in va_get_ok va_sM /\ va_mul_nat pow2_64 (va_get_reg64
rRdx va_sM) + va_get_reg64 rRax va_sM == va_mul_nat (va_get_reg64 rRax va_s0) (va_eval_opr64
va_s0 src) ==> va_k va_sM (())))
val va_wpProof_Mul64Wrap : src:va_operand_opr64 -> va_s0:va_state -> va_k:(va_state -> unit ->
Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Mul64Wrap src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Mul64Wrap src) ([va_Mod_reg64 rRdx;
va_Mod_reg64 rRax; va_Mod_flags]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Mul64Wrap (src:va_operand_opr64) : (va_quickCode unit (va_code_Mul64Wrap src)) =
(va_QProc (va_code_Mul64Wrap src) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRax; va_Mod_flags])
(va_wp_Mul64Wrap src) (va_wpProof_Mul64Wrap src))
//--
//-- Mulx64
val va_code_Mulx64 : dst_hi:va_operand_dst_opr64 -> dst_lo:va_operand_dst_opr64 ->
src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Mulx64 : dst_hi:va_operand_dst_opr64 -> dst_lo:va_operand_dst_opr64 ->
src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Mulx64 : va_b0:va_code -> va_s0:va_state -> dst_hi:va_operand_dst_opr64 ->
dst_lo:va_operand_dst_opr64 -> src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Mulx64 dst_hi dst_lo src) va_s0 /\ va_is_dst_dst_opr64
dst_hi va_s0 /\ va_is_dst_dst_opr64 dst_lo va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok
va_s0 /\ bmi2_enabled /\ dst_hi =!= dst_lo))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_mul_nat pow2_64 (va_eval_dst_opr64 va_sM dst_hi) + va_eval_dst_opr64 va_sM dst_lo ==
va_mul_nat (va_get_reg64 rRdx va_s0) (va_eval_opr64 va_s0 src) /\ va_state_eq va_sM
(va_update_ok va_sM (va_update_operand_dst_opr64 dst_lo va_sM (va_update_operand_dst_opr64
dst_hi va_sM va_s0)))))
[@ va_qattr]
let va_wp_Mulx64 (dst_hi:va_operand_dst_opr64) (dst_lo:va_operand_dst_opr64) (src:va_operand_opr64)
(va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst_hi va_s0 /\ va_is_dst_dst_opr64 dst_lo va_s0 /\ va_is_src_opr64 src
va_s0 /\ va_get_ok va_s0 /\ bmi2_enabled /\ dst_hi =!= dst_lo /\ (forall
(va_x_dst_hi:va_value_dst_opr64) (va_x_dst_lo:va_value_dst_opr64) . let va_sM =
va_upd_operand_dst_opr64 dst_lo va_x_dst_lo (va_upd_operand_dst_opr64 dst_hi va_x_dst_hi va_s0)
in va_get_ok va_sM /\ va_mul_nat pow2_64 (va_eval_dst_opr64 va_sM dst_hi) + va_eval_dst_opr64
va_sM dst_lo == va_mul_nat (va_get_reg64 rRdx va_s0) (va_eval_opr64 va_s0 src) ==> va_k va_sM
(())))
val va_wpProof_Mulx64 : dst_hi:va_operand_dst_opr64 -> dst_lo:va_operand_dst_opr64 ->
src:va_operand_opr64 -> va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Mulx64 dst_hi dst_lo src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Mulx64 dst_hi dst_lo src)
([va_mod_dst_opr64 dst_lo; va_mod_dst_opr64 dst_hi]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Mulx64 (dst_hi:va_operand_dst_opr64) (dst_lo:va_operand_dst_opr64)
(src:va_operand_opr64) : (va_quickCode unit (va_code_Mulx64 dst_hi dst_lo src)) =
(va_QProc (va_code_Mulx64 dst_hi dst_lo src) ([va_mod_dst_opr64 dst_lo; va_mod_dst_opr64 dst_hi])
(va_wp_Mulx64 dst_hi dst_lo src) (va_wpProof_Mulx64 dst_hi dst_lo src))
//--
//-- IMul64
val va_code_IMul64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_IMul64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_IMul64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_IMul64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ va_mul_nat (va_eval_dst_opr64 va_s0 dst)
(va_eval_opr64 va_s0 src) < pow2_64))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == va_mul_nat (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0
src) /\ va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM
(va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_IMul64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state
-> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ va_mul_nat
(va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src) < pow2_64 /\ (forall
(va_x_dst:va_value_dst_opr64) (va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl
(va_upd_operand_dst_opr64 dst va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst
== va_mul_nat (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src) ==> va_k va_sM (())))
val va_wpProof_IMul64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_IMul64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_IMul64 dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_IMul64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_IMul64 dst src)) =
(va_QProc (va_code_IMul64 dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_IMul64 dst src)
(va_wpProof_IMul64 dst src))
//--
//-- Xor64
val va_code_Xor64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Xor64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Xor64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Xor64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.ixor64 (va_eval_dst_opr64 va_s0 dst)
(va_eval_opr64 va_s0 src) /\ ~(Vale.X64.Decls.overflow (va_get_flags va_sM)) /\
~(Vale.X64.Decls.cf (va_get_flags va_sM)) /\ Vale.X64.Decls.valid_cf (va_get_flags va_sM) /\
Vale.X64.Decls.valid_of (va_get_flags va_sM) /\ va_state_eq va_sM (va_update_flags va_sM
(va_update_ok va_sM (va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Xor64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state
-> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ (forall
(va_x_dst:va_value_dst_opr64) (va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl
(va_upd_operand_dst_opr64 dst va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst
== Vale.Arch.Types.ixor64 (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src) /\
~(Vale.X64.Decls.overflow (va_get_flags va_sM)) /\ ~(Vale.X64.Decls.cf (va_get_flags va_sM)) /\
Vale.X64.Decls.valid_cf (va_get_flags va_sM) /\ Vale.X64.Decls.valid_of (va_get_flags va_sM)
==> va_k va_sM (())))
val va_wpProof_Xor64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Xor64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Xor64 dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Xor64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Xor64 dst src)) =
(va_QProc (va_code_Xor64 dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Xor64 dst src)
(va_wpProof_Xor64 dst src))
//--
//-- And64
val va_code_And64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_And64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_And64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_And64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.iand64 (va_eval_dst_opr64 va_s0 dst)
(va_eval_opr64 va_s0 src) /\ va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM
(va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_And64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state
-> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ (forall
(va_x_dst:va_value_dst_opr64) (va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl
(va_upd_operand_dst_opr64 dst va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst
== Vale.Arch.Types.iand64 (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src) ==> va_k
va_sM (())))
val va_wpProof_And64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_And64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_And64 dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_And64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_And64 dst src)) =
(va_QProc (va_code_And64 dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_And64 dst src)
(va_wpProof_And64 dst src))
//--
//-- Shl64
val va_code_Shl64 : dst:va_operand_dst_opr64 -> amt:va_operand_shift_amt64 -> Tot va_code
val va_codegen_success_Shl64 : dst:va_operand_dst_opr64 -> amt:va_operand_shift_amt64 -> Tot
va_pbool
val va_lemma_Shl64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
amt:va_operand_shift_amt64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Shl64 dst amt) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_shift_amt64 amt va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.ishl64 (va_eval_dst_opr64 va_s0 dst)
(va_eval_shift_amt64 va_s0 amt) /\ va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM
(va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Shl64 (dst:va_operand_dst_opr64) (amt:va_operand_shift_amt64) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_shift_amt64 amt va_s0 /\ va_get_ok va_s0 /\ (forall
(va_x_dst:va_value_dst_opr64) (va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl
(va_upd_operand_dst_opr64 dst va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst
== Vale.Arch.Types.ishl64 (va_eval_dst_opr64 va_s0 dst) (va_eval_shift_amt64 va_s0 amt) ==>
va_k va_sM (())))
val va_wpProof_Shl64 : dst:va_operand_dst_opr64 -> amt:va_operand_shift_amt64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Shl64 dst amt va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Shl64 dst amt) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Shl64 (dst:va_operand_dst_opr64) (amt:va_operand_shift_amt64) : (va_quickCode unit
(va_code_Shl64 dst amt)) =
(va_QProc (va_code_Shl64 dst amt) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Shl64 dst amt)
(va_wpProof_Shl64 dst amt))
//--
//-- Shr64
val va_code_Shr64 : dst:va_operand_dst_opr64 -> amt:va_operand_shift_amt64 -> Tot va_code
val va_codegen_success_Shr64 : dst:va_operand_dst_opr64 -> amt:va_operand_shift_amt64 -> Tot
va_pbool
val va_lemma_Shr64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
amt:va_operand_shift_amt64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Shr64 dst amt) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_shift_amt64 amt va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.ishr64 (va_eval_dst_opr64 va_s0 dst)
(va_eval_shift_amt64 va_s0 amt) /\ va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM
(va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Shr64 (dst:va_operand_dst_opr64) (amt:va_operand_shift_amt64) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_shift_amt64 amt va_s0 /\ va_get_ok va_s0 /\ (forall
(va_x_dst:va_value_dst_opr64) (va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl
(va_upd_operand_dst_opr64 dst va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst
== Vale.Arch.Types.ishr64 (va_eval_dst_opr64 va_s0 dst) (va_eval_shift_amt64 va_s0 amt) ==>
va_k va_sM (())))
val va_wpProof_Shr64 : dst:va_operand_dst_opr64 -> amt:va_operand_shift_amt64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Shr64 dst amt va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Shr64 dst amt) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Shr64 (dst:va_operand_dst_opr64) (amt:va_operand_shift_amt64) : (va_quickCode unit
(va_code_Shr64 dst amt)) =
(va_QProc (va_code_Shr64 dst amt) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Shr64 dst amt)
(va_wpProof_Shr64 dst amt))
//--
//-- Cpuid_AES
val va_code_Cpuid_AES : va_dummy:unit -> Tot va_code
val va_codegen_success_Cpuid_AES : va_dummy:unit -> Tot va_pbool
val va_lemma_Cpuid_AES : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Cpuid_AES ()) va_s0 /\ va_get_ok va_s0 /\ va_get_reg64
rRax va_s0 = 1))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 33554432 > 0 == aesni_enabled /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 2 > 0 == pclmulqdq_enabled /\ va_state_eq
va_sM (va_update_reg64 rRdx va_sM (va_update_reg64 rRcx va_sM (va_update_reg64 rRbx va_sM
(va_update_reg64 rRax va_sM (va_update_ok va_sM va_s0)))))))
[@ va_qattr]
let va_wp_Cpuid_AES (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ va_get_reg64 rRax va_s0 = 1 /\ (forall (va_x_rax:nat64) (va_x_rbx:nat64)
(va_x_rcx:nat64) (va_x_rdx:nat64) . let va_sM = va_upd_reg64 rRdx va_x_rdx (va_upd_reg64 rRcx
va_x_rcx (va_upd_reg64 rRbx va_x_rbx (va_upd_reg64 rRax va_x_rax va_s0))) in va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 33554432 > 0 == aesni_enabled /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 2 > 0 == pclmulqdq_enabled ==> va_k va_sM
(())))
val va_wpProof_Cpuid_AES : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Cpuid_AES va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Cpuid_AES ()) ([va_Mod_reg64 rRdx;
va_Mod_reg64 rRcx; va_Mod_reg64 rRbx; va_Mod_reg64 rRax]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Cpuid_AES () : (va_quickCode unit (va_code_Cpuid_AES ())) =
(va_QProc (va_code_Cpuid_AES ()) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRcx; va_Mod_reg64 rRbx;
va_Mod_reg64 rRax]) va_wp_Cpuid_AES va_wpProof_Cpuid_AES)
//--
//-- Cpuid_Sha
val va_code_Cpuid_Sha : va_dummy:unit -> Tot va_code
val va_codegen_success_Cpuid_Sha : va_dummy:unit -> Tot va_pbool
val va_lemma_Cpuid_Sha : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Cpuid_Sha ()) va_s0 /\ va_get_ok va_s0 /\ va_get_reg64
rRax va_s0 = 7 /\ va_get_reg64 rRcx va_s0 = 0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 536870912 > 0 == sha_enabled /\ va_state_eq
va_sM (va_update_reg64 rRdx va_sM (va_update_reg64 rRcx va_sM (va_update_reg64 rRbx va_sM
(va_update_reg64 rRax va_sM (va_update_ok va_sM va_s0)))))))
[@ va_qattr]
let va_wp_Cpuid_Sha (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ va_get_reg64 rRax va_s0 = 7 /\ va_get_reg64 rRcx va_s0 = 0 /\ (forall
(va_x_rax:nat64) (va_x_rbx:nat64) (va_x_rcx:nat64) (va_x_rdx:nat64) . let va_sM = va_upd_reg64
rRdx va_x_rdx (va_upd_reg64 rRcx va_x_rcx (va_upd_reg64 rRbx va_x_rbx (va_upd_reg64 rRax
va_x_rax va_s0))) in va_get_ok va_sM /\ Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM)
536870912 > 0 == sha_enabled ==> va_k va_sM (())))
val va_wpProof_Cpuid_Sha : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Cpuid_Sha va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Cpuid_Sha ()) ([va_Mod_reg64 rRdx;
va_Mod_reg64 rRcx; va_Mod_reg64 rRbx; va_Mod_reg64 rRax]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Cpuid_Sha () : (va_quickCode unit (va_code_Cpuid_Sha ())) =
(va_QProc (va_code_Cpuid_Sha ()) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRcx; va_Mod_reg64 rRbx;
va_Mod_reg64 rRax]) va_wp_Cpuid_Sha va_wpProof_Cpuid_Sha)
//--
//-- Cpuid_Adx_Bmi2
val va_code_Cpuid_Adx_Bmi2 : va_dummy:unit -> Tot va_code
val va_codegen_success_Cpuid_Adx_Bmi2 : va_dummy:unit -> Tot va_pbool
val va_lemma_Cpuid_Adx_Bmi2 : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Cpuid_Adx_Bmi2 ()) va_s0 /\ va_get_ok va_s0 /\
va_get_reg64 rRax va_s0 = 7 /\ va_get_reg64 rRcx va_s0 = 0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 256 > 0 == bmi2_enabled /\
Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 524288 > 0 == adx_enabled /\ va_state_eq va_sM
(va_update_reg64 rRdx va_sM (va_update_reg64 rRcx va_sM (va_update_reg64 rRbx va_sM
(va_update_reg64 rRax va_sM (va_update_ok va_sM va_s0)))))))
[@ va_qattr]
let va_wp_Cpuid_Adx_Bmi2 (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ va_get_reg64 rRax va_s0 = 7 /\ va_get_reg64 rRcx va_s0 = 0 /\ (forall
(va_x_rax:nat64) (va_x_rbx:nat64) (va_x_rcx:nat64) (va_x_rdx:nat64) . let va_sM = va_upd_reg64
rRdx va_x_rdx (va_upd_reg64 rRcx va_x_rcx (va_upd_reg64 rRbx va_x_rbx (va_upd_reg64 rRax
va_x_rax va_s0))) in va_get_ok va_sM /\ Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 256 >
0 == bmi2_enabled /\ Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 524288 > 0 == adx_enabled
==> va_k va_sM (())))
val va_wpProof_Cpuid_Adx_Bmi2 : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Cpuid_Adx_Bmi2 va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Cpuid_Adx_Bmi2 ()) ([va_Mod_reg64
rRdx; va_Mod_reg64 rRcx; va_Mod_reg64 rRbx; va_Mod_reg64 rRax]) va_s0 va_k ((va_sM, va_f0,
va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Cpuid_Adx_Bmi2 () : (va_quickCode unit (va_code_Cpuid_Adx_Bmi2 ())) =
(va_QProc (va_code_Cpuid_Adx_Bmi2 ()) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRcx; va_Mod_reg64 rRbx;
va_Mod_reg64 rRax]) va_wp_Cpuid_Adx_Bmi2 va_wpProof_Cpuid_Adx_Bmi2)
//--
//-- Cpuid_Avx
val va_code_Cpuid_Avx : va_dummy:unit -> Tot va_code
val va_codegen_success_Cpuid_Avx : va_dummy:unit -> Tot va_pbool
val va_lemma_Cpuid_Avx : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Cpuid_Avx ()) va_s0 /\ va_get_ok va_s0 /\ va_get_reg64
rRax va_s0 = 1))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 268435456 > 0 == avx_cpuid_enabled /\
va_state_eq va_sM (va_update_reg64 rRdx va_sM (va_update_reg64 rRcx va_sM (va_update_reg64 rRbx
va_sM (va_update_reg64 rRax va_sM (va_update_ok va_sM va_s0)))))))
[@ va_qattr]
let va_wp_Cpuid_Avx (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ va_get_reg64 rRax va_s0 = 1 /\ (forall (va_x_rax:nat64) (va_x_rbx:nat64)
(va_x_rcx:nat64) (va_x_rdx:nat64) . let va_sM = va_upd_reg64 rRdx va_x_rdx (va_upd_reg64 rRcx
va_x_rcx (va_upd_reg64 rRbx va_x_rbx (va_upd_reg64 rRax va_x_rax va_s0))) in va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 268435456 > 0 == avx_cpuid_enabled ==> va_k
va_sM (())))
val va_wpProof_Cpuid_Avx : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Cpuid_Avx va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Cpuid_Avx ()) ([va_Mod_reg64 rRdx;
va_Mod_reg64 rRcx; va_Mod_reg64 rRbx; va_Mod_reg64 rRax]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Cpuid_Avx () : (va_quickCode unit (va_code_Cpuid_Avx ())) =
(va_QProc (va_code_Cpuid_Avx ()) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRcx; va_Mod_reg64 rRbx;
va_Mod_reg64 rRax]) va_wp_Cpuid_Avx va_wpProof_Cpuid_Avx)
//--
//-- Cpuid_Avx2
val va_code_Cpuid_Avx2 : va_dummy:unit -> Tot va_code
val va_codegen_success_Cpuid_Avx2 : va_dummy:unit -> Tot va_pbool
val va_lemma_Cpuid_Avx2 : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Cpuid_Avx2 ()) va_s0 /\ va_get_ok va_s0 /\
va_get_reg64 rRax va_s0 = 7 /\ va_get_reg64 rRcx va_s0 = 0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 32 > 0 == avx2_cpuid_enabled /\ va_state_eq
va_sM (va_update_reg64 rRdx va_sM (va_update_reg64 rRcx va_sM (va_update_reg64 rRbx va_sM
(va_update_reg64 rRax va_sM (va_update_ok va_sM va_s0)))))))
[@ va_qattr]
let va_wp_Cpuid_Avx2 (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ va_get_reg64 rRax va_s0 = 7 /\ va_get_reg64 rRcx va_s0 = 0 /\ (forall
(va_x_rax:nat64) (va_x_rbx:nat64) (va_x_rcx:nat64) (va_x_rdx:nat64) . let va_sM = va_upd_reg64
rRdx va_x_rdx (va_upd_reg64 rRcx va_x_rcx (va_upd_reg64 rRbx va_x_rbx (va_upd_reg64 rRax
va_x_rax va_s0))) in va_get_ok va_sM /\ Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 32 > 0
== avx2_cpuid_enabled ==> va_k va_sM (())))
val va_wpProof_Cpuid_Avx2 : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Cpuid_Avx2 va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Cpuid_Avx2 ()) ([va_Mod_reg64 rRdx;
va_Mod_reg64 rRcx; va_Mod_reg64 rRbx; va_Mod_reg64 rRax]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Cpuid_Avx2 () : (va_quickCode unit (va_code_Cpuid_Avx2 ())) =
(va_QProc (va_code_Cpuid_Avx2 ()) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRcx; va_Mod_reg64 rRbx;
va_Mod_reg64 rRax]) va_wp_Cpuid_Avx2 va_wpProof_Cpuid_Avx2)
//--
//-- Cpuid_Sse
val va_code_Cpuid_Sse : va_dummy:unit -> Tot va_code
val va_codegen_success_Cpuid_Sse : va_dummy:unit -> Tot va_pbool
val va_lemma_Cpuid_Sse : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Cpuid_Sse ()) va_s0 /\ va_get_ok va_s0 /\ va_get_reg64
rRax va_s0 = 1))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRdx va_sM) 67108864 > 0 == sse2_enabled /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 524288 > 0 == sse4_1_enabled /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 512 > 0 == ssse3_enabled /\ va_state_eq va_sM
(va_update_reg64 rRdx va_sM (va_update_reg64 rRcx va_sM (va_update_reg64 rRbx va_sM
(va_update_reg64 rRax va_sM (va_update_ok va_sM va_s0)))))))
[@ va_qattr]
let va_wp_Cpuid_Sse (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ va_get_reg64 rRax va_s0 = 1 /\ (forall (va_x_rax:nat64) (va_x_rbx:nat64)
(va_x_rcx:nat64) (va_x_rdx:nat64) . let va_sM = va_upd_reg64 rRdx va_x_rdx (va_upd_reg64 rRcx
va_x_rcx (va_upd_reg64 rRbx va_x_rbx (va_upd_reg64 rRax va_x_rax va_s0))) in va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRdx va_sM) 67108864 > 0 == sse2_enabled /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 524288 > 0 == sse4_1_enabled /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 512 > 0 == ssse3_enabled ==> va_k va_sM (())))
val va_wpProof_Cpuid_Sse : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Cpuid_Sse va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Cpuid_Sse ()) ([va_Mod_reg64 rRdx;
va_Mod_reg64 rRcx; va_Mod_reg64 rRbx; va_Mod_reg64 rRax]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Cpuid_Sse () : (va_quickCode unit (va_code_Cpuid_Sse ())) =
(va_QProc (va_code_Cpuid_Sse ()) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRcx; va_Mod_reg64 rRbx;
va_Mod_reg64 rRax]) va_wp_Cpuid_Sse va_wpProof_Cpuid_Sse)
//--
//-- Cpuid_Movbe
val va_code_Cpuid_Movbe : va_dummy:unit -> Tot va_code
val va_codegen_success_Cpuid_Movbe : va_dummy:unit -> Tot va_pbool
val va_lemma_Cpuid_Movbe : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Cpuid_Movbe ()) va_s0 /\ va_get_ok va_s0 /\
va_get_reg64 rRax va_s0 = 1))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 4194304 > 0 == movbe_enabled /\ va_state_eq
va_sM (va_update_reg64 rRdx va_sM (va_update_reg64 rRcx va_sM (va_update_reg64 rRbx va_sM
(va_update_reg64 rRax va_sM (va_update_ok va_sM va_s0)))))))
[@ va_qattr]
let va_wp_Cpuid_Movbe (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ va_get_reg64 rRax va_s0 = 1 /\ (forall (va_x_rax:nat64) (va_x_rbx:nat64)
(va_x_rcx:nat64) (va_x_rdx:nat64) . let va_sM = va_upd_reg64 rRdx va_x_rdx (va_upd_reg64 rRcx
va_x_rcx (va_upd_reg64 rRbx va_x_rbx (va_upd_reg64 rRax va_x_rax va_s0))) in va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 4194304 > 0 == movbe_enabled ==> va_k va_sM
(())))
val va_wpProof_Cpuid_Movbe : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Cpuid_Movbe va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Cpuid_Movbe ()) ([va_Mod_reg64 rRdx;
va_Mod_reg64 rRcx; va_Mod_reg64 rRbx; va_Mod_reg64 rRax]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Cpuid_Movbe () : (va_quickCode unit (va_code_Cpuid_Movbe ())) =
(va_QProc (va_code_Cpuid_Movbe ()) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRcx; va_Mod_reg64 rRbx;
va_Mod_reg64 rRax]) va_wp_Cpuid_Movbe va_wpProof_Cpuid_Movbe)
//--
//-- Cpuid_Rdrand
val va_code_Cpuid_Rdrand : va_dummy:unit -> Tot va_code
val va_codegen_success_Cpuid_Rdrand : va_dummy:unit -> Tot va_pbool
val va_lemma_Cpuid_Rdrand : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Cpuid_Rdrand ()) va_s0 /\ va_get_ok va_s0 /\
va_get_reg64 rRax va_s0 = 1))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 1073741824 > 0 == rdrand_enabled /\
va_state_eq va_sM (va_update_reg64 rRdx va_sM (va_update_reg64 rRcx va_sM (va_update_reg64 rRbx
va_sM (va_update_reg64 rRax va_sM (va_update_ok va_sM va_s0)))))))
[@ va_qattr]
let va_wp_Cpuid_Rdrand (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ va_get_reg64 rRax va_s0 = 1 /\ (forall (va_x_rax:nat64) (va_x_rbx:nat64)
(va_x_rcx:nat64) (va_x_rdx:nat64) . let va_sM = va_upd_reg64 rRdx va_x_rdx (va_upd_reg64 rRcx
va_x_rcx (va_upd_reg64 rRbx va_x_rbx (va_upd_reg64 rRax va_x_rax va_s0))) in va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 1073741824 > 0 == rdrand_enabled ==> va_k
va_sM (())))
val va_wpProof_Cpuid_Rdrand : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Cpuid_Rdrand va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Cpuid_Rdrand ()) ([va_Mod_reg64 rRdx;
va_Mod_reg64 rRcx; va_Mod_reg64 rRbx; va_Mod_reg64 rRax]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Cpuid_Rdrand () : (va_quickCode unit (va_code_Cpuid_Rdrand ())) =
(va_QProc (va_code_Cpuid_Rdrand ()) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRcx; va_Mod_reg64 rRbx;
va_Mod_reg64 rRax]) va_wp_Cpuid_Rdrand va_wpProof_Cpuid_Rdrand)
//--
//-- Cpuid_Avx512
val va_code_Cpuid_Avx512 : va_dummy:unit -> Tot va_code
val va_codegen_success_Cpuid_Avx512 : va_dummy:unit -> Tot va_pbool
val va_lemma_Cpuid_Avx512 : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Cpuid_Avx512 ()) va_s0 /\ va_get_ok va_s0 /\
va_get_reg64 rRax va_s0 = 7 /\ va_get_reg64 rRcx va_s0 = 0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 65536 > 0 == avx512f_enabled /\
Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 131072 > 0 == avx512dq_enabled /\
Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 1073741824 > 0 == avx512bw_enabled /\
Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 2147483648 > 0 == avx512vl_enabled /\
va_state_eq va_sM (va_update_reg64 rRdx va_sM (va_update_reg64 rRcx va_sM (va_update_reg64 rRbx
va_sM (va_update_reg64 rRax va_sM (va_update_ok va_sM va_s0)))))))
[@ va_qattr]
let va_wp_Cpuid_Avx512 (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ va_get_reg64 rRax va_s0 = 7 /\ va_get_reg64 rRcx va_s0 = 0 /\ (forall
(va_x_rax:nat64) (va_x_rbx:nat64) (va_x_rcx:nat64) (va_x_rdx:nat64) . let va_sM = va_upd_reg64
rRdx va_x_rdx (va_upd_reg64 rRcx va_x_rcx (va_upd_reg64 rRbx va_x_rbx (va_upd_reg64 rRax
va_x_rax va_s0))) in va_get_ok va_sM /\ Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 65536
> 0 == avx512f_enabled /\ Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 131072 > 0 ==
avx512dq_enabled /\ Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 1073741824 > 0 ==
avx512bw_enabled /\ Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 2147483648 > 0 ==
avx512vl_enabled ==> va_k va_sM (())))
val va_wpProof_Cpuid_Avx512 : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Cpuid_Avx512 va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Cpuid_Avx512 ()) ([va_Mod_reg64 rRdx;
va_Mod_reg64 rRcx; va_Mod_reg64 rRbx; va_Mod_reg64 rRax]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Cpuid_Avx512 () : (va_quickCode unit (va_code_Cpuid_Avx512 ())) =
(va_QProc (va_code_Cpuid_Avx512 ()) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRcx; va_Mod_reg64 rRbx;
va_Mod_reg64 rRax]) va_wp_Cpuid_Avx512 va_wpProof_Cpuid_Avx512)
//--
//-- Cpuid_Osxsave
val va_code_Cpuid_Osxsave : va_dummy:unit -> Tot va_code
val va_codegen_success_Cpuid_Osxsave : va_dummy:unit -> Tot va_pbool
val va_lemma_Cpuid_Osxsave : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Cpuid_Osxsave ()) va_s0 /\ va_get_ok va_s0 /\
va_get_reg64 rRax va_s0 = 1))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 134217728 > 0 == osxsave_enabled /\
va_state_eq va_sM (va_update_reg64 rRdx va_sM (va_update_reg64 rRcx va_sM (va_update_reg64 rRbx
va_sM (va_update_reg64 rRax va_sM (va_update_ok va_sM va_s0)))))))
[@ va_qattr]
let va_wp_Cpuid_Osxsave (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ va_get_reg64 rRax va_s0 = 1 /\ (forall (va_x_rax:nat64) (va_x_rbx:nat64)
(va_x_rcx:nat64) (va_x_rdx:nat64) . let va_sM = va_upd_reg64 rRdx va_x_rdx (va_upd_reg64 rRcx
va_x_rcx (va_upd_reg64 rRbx va_x_rbx (va_upd_reg64 rRax va_x_rax va_s0))) in va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 134217728 > 0 == osxsave_enabled ==> va_k
va_sM (())))
val va_wpProof_Cpuid_Osxsave : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Cpuid_Osxsave va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Cpuid_Osxsave ()) ([va_Mod_reg64 rRdx;
va_Mod_reg64 rRcx; va_Mod_reg64 rRbx; va_Mod_reg64 rRax]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Cpuid_Osxsave () : (va_quickCode unit (va_code_Cpuid_Osxsave ())) =
(va_QProc (va_code_Cpuid_Osxsave ()) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRcx; va_Mod_reg64 rRbx;
va_Mod_reg64 rRax]) va_wp_Cpuid_Osxsave va_wpProof_Cpuid_Osxsave)
//--
//-- Xgetbv_Avx
val va_code_Xgetbv_Avx : va_dummy:unit -> Tot va_code
val va_codegen_success_Xgetbv_Avx : va_dummy:unit -> Tot va_pbool
val va_lemma_Xgetbv_Avx : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Xgetbv_Avx ()) va_s0 /\ va_get_ok va_s0 /\
osxsave_enabled /\ va_get_reg64 rRcx va_s0 = 0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRax va_sM) 2 > 0 == sse_xcr0_enabled /\
Vale.Arch.Types.iand64 (va_get_reg64 rRax va_sM) 4 > 0 == avx_xcr0_enabled /\ va_state_eq va_sM
(va_update_reg64 rRdx va_sM (va_update_reg64 rRax va_sM (va_update_ok va_sM va_s0)))))
[@ va_qattr]
let va_wp_Xgetbv_Avx (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ osxsave_enabled /\ va_get_reg64 rRcx va_s0 = 0 /\ (forall (va_x_rax:nat64)
(va_x_rdx:nat64) . let va_sM = va_upd_reg64 rRdx va_x_rdx (va_upd_reg64 rRax va_x_rax va_s0) in
va_get_ok va_sM /\ Vale.Arch.Types.iand64 (va_get_reg64 rRax va_sM) 2 > 0 == sse_xcr0_enabled
/\ Vale.Arch.Types.iand64 (va_get_reg64 rRax va_sM) 4 > 0 == avx_xcr0_enabled ==> va_k va_sM
(())))
val va_wpProof_Xgetbv_Avx : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Xgetbv_Avx va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Xgetbv_Avx ()) ([va_Mod_reg64 rRdx;
va_Mod_reg64 rRax]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Xgetbv_Avx () : (va_quickCode unit (va_code_Xgetbv_Avx ())) =
(va_QProc (va_code_Xgetbv_Avx ()) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRax]) va_wp_Xgetbv_Avx
va_wpProof_Xgetbv_Avx)
//--
//-- Xgetbv_Avx512
val va_code_Xgetbv_Avx512 : va_dummy:unit -> Tot va_code
val va_codegen_success_Xgetbv_Avx512 : va_dummy:unit -> Tot va_pbool
val va_lemma_Xgetbv_Avx512 : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Xgetbv_Avx512 ()) va_s0 /\ va_get_ok va_s0 /\
osxsave_enabled /\ va_get_reg64 rRcx va_s0 = 0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRax va_sM) 32 > 0 == opmask_xcr0_enabled /\
Vale.Arch.Types.iand64 (va_get_reg64 rRax va_sM) 64 > 0 == zmm_hi256_xcr0_enabled /\
Vale.Arch.Types.iand64 (va_get_reg64 rRax va_sM) 128 > 0 == hi16_zmm_xcr0_enabled /\
va_state_eq va_sM (va_update_reg64 rRdx va_sM (va_update_reg64 rRax va_sM (va_update_ok va_sM
va_s0)))))
[@ va_qattr]
let va_wp_Xgetbv_Avx512 (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ osxsave_enabled /\ va_get_reg64 rRcx va_s0 = 0 /\ (forall (va_x_rax:nat64)
(va_x_rdx:nat64) . let va_sM = va_upd_reg64 rRdx va_x_rdx (va_upd_reg64 rRax va_x_rax va_s0) in
va_get_ok va_sM /\ Vale.Arch.Types.iand64 (va_get_reg64 rRax va_sM) 32 > 0 ==
opmask_xcr0_enabled /\ Vale.Arch.Types.iand64 (va_get_reg64 rRax va_sM) 64 > 0 ==
zmm_hi256_xcr0_enabled /\ Vale.Arch.Types.iand64 (va_get_reg64 rRax va_sM) 128 > 0 ==
hi16_zmm_xcr0_enabled ==> va_k va_sM (())))
val va_wpProof_Xgetbv_Avx512 : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Xgetbv_Avx512 va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Xgetbv_Avx512 ()) ([va_Mod_reg64 rRdx;
va_Mod_reg64 rRax]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Xgetbv_Avx512 () : (va_quickCode unit (va_code_Xgetbv_Avx512 ())) =
(va_QProc (va_code_Xgetbv_Avx512 ()) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRax]) va_wp_Xgetbv_Avx512
va_wpProof_Xgetbv_Avx512)
//--
//-- Nat64Equal
val va_code_Nat64Equal : dst:va_operand_reg_opr64 -> src:va_operand_reg_opr64 -> Tot va_code
val va_codegen_success_Nat64Equal : dst:va_operand_reg_opr64 -> src:va_operand_reg_opr64 -> Tot
va_pbool
val va_lemma_Nat64Equal : va_b0:va_code -> va_s0:va_state -> dst:va_operand_reg_opr64 ->
src:va_operand_reg_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Nat64Equal dst src) va_s0 /\ va_is_dst_reg_opr64 dst
va_s0 /\ va_is_dst_reg_opr64 src va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\ (if
(va_eval_reg_opr64 va_s0 src = 18446744073709551615) then (va_eval_reg_opr64 va_sM dst = 0)
else (va_eval_reg_opr64 va_sM dst = 1)) /\ va_state_eq va_sM (va_update_flags va_sM
(va_update_ok va_sM (va_update_operand_reg_opr64 src va_sM (va_update_operand_reg_opr64 dst
va_sM va_s0))))))
[@ va_qattr]
let va_wp_Nat64Equal (dst:va_operand_reg_opr64) (src:va_operand_reg_opr64) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_reg_opr64 dst va_s0 /\ va_is_dst_reg_opr64 src va_s0 /\ va_get_ok va_s0 /\ (forall
(va_x_dst:va_value_reg_opr64) (va_x_src:va_value_reg_opr64) (va_x_efl:Vale.X64.Flags.t) . let
va_sM = va_upd_flags va_x_efl (va_upd_operand_reg_opr64 src va_x_src (va_upd_operand_reg_opr64
dst va_x_dst va_s0)) in va_get_ok va_sM /\ va_if (va_eval_reg_opr64 va_s0 src =
18446744073709551615) (fun _ -> va_eval_reg_opr64 va_sM dst = 0) (fun _ -> va_eval_reg_opr64
va_sM dst = 1) ==> va_k va_sM (())))
val va_wpProof_Nat64Equal : dst:va_operand_reg_opr64 -> src:va_operand_reg_opr64 -> va_s0:va_state
-> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Nat64Equal dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Nat64Equal dst src) ([va_Mod_flags;
va_mod_reg_opr64 src; va_mod_reg_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Nat64Equal (dst:va_operand_reg_opr64) (src:va_operand_reg_opr64) : (va_quickCode unit
(va_code_Nat64Equal dst src)) =
(va_QProc (va_code_Nat64Equal dst src) ([va_Mod_flags; va_mod_reg_opr64 src; va_mod_reg_opr64
dst]) (va_wp_Nat64Equal dst src) (va_wpProof_Nat64Equal dst src))
//--
//-- Comment
val va_code_Comment : c:string -> Tot va_code
val va_codegen_success_Comment : c:string -> Tot va_pbool
val va_lemma_Comment : va_b0:va_code -> va_s0:va_state -> c:string
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Comment c) va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_state_eq va_sM (va_update_ok va_sM va_s0)))
[@ va_qattr]
let va_wp_Comment (c:string) (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ (let va_sM = va_s0 in va_get_ok va_sM ==> va_k va_sM (())))
val va_wpProof_Comment : c:string -> va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Comment c va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Comment c) ([]) va_s0 va_k ((va_sM,
va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Comment (c:string) : (va_quickCode unit (va_code_Comment c)) =
(va_QProc (va_code_Comment c) ([]) (va_wp_Comment c) (va_wpProof_Comment c))
//--
//-- LargeComment
val va_code_LargeComment : c:string -> Tot va_code
val va_codegen_success_LargeComment : c:string -> Tot va_pbool
val va_lemma_LargeComment : va_b0:va_code -> va_s0:va_state -> c:string
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_LargeComment c) va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_state_eq va_sM (va_update_ok va_sM va_s0)))
[@ va_qattr]
let va_wp_LargeComment (c:string) (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ (let va_sM = va_s0 in va_get_ok va_sM ==> va_k va_sM (())))
val va_wpProof_LargeComment : c:string -> va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_LargeComment c va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_LargeComment c) ([]) va_s0 va_k
((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_LargeComment (c:string) : (va_quickCode unit (va_code_LargeComment c)) =
(va_QProc (va_code_LargeComment c) ([]) (va_wp_LargeComment c) (va_wpProof_LargeComment c))
//--
//-- NoNewline
val va_code_NoNewline : va_dummy:unit -> Tot va_code
val va_codegen_success_NoNewline : va_dummy:unit -> Tot va_pbool
val va_lemma_NoNewline : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_NoNewline ()) va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_state_eq va_sM (va_update_ok va_sM va_s0)))
[@ va_qattr]
let va_wp_NoNewline (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ (let va_sM = va_s0 in va_get_ok va_sM ==> va_k va_sM (())))
val va_wpProof_NoNewline : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_NoNewline va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_NoNewline ()) ([]) va_s0 va_k ((va_sM,
va_f0, va_g))))
[@ "opaque_to_smt" va_qattr] | false | false | Vale.X64.InsBasic.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val va_quick_NoNewline: Prims.unit -> (va_quickCode unit (va_code_NoNewline ())) | [] | Vale.X64.InsBasic.va_quick_NoNewline | {
"file_name": "obj/Vale.X64.InsBasic.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | _: Prims.unit -> Vale.X64.QuickCode.va_quickCode Prims.unit (Vale.X64.InsBasic.va_code_NoNewline ()) | {
"end_col": 77,
"end_line": 1269,
"start_col": 2,
"start_line": 1269
} |
Prims.Tot | val va_quick_Newline: Prims.unit -> (va_quickCode unit (va_code_Newline ())) | [
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let va_quick_Newline () : (va_quickCode unit (va_code_Newline ())) =
(va_QProc (va_code_Newline ()) ([]) va_wp_Newline va_wpProof_Newline) | val va_quick_Newline: Prims.unit -> (va_quickCode unit (va_code_Newline ()))
let va_quick_Newline () : (va_quickCode unit (va_code_Newline ())) = | false | null | false | (va_QProc (va_code_Newline ()) ([]) va_wp_Newline va_wpProof_Newline) | {
"checked_file": "Vale.X64.InsBasic.fsti.checked",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.QuickCode.fst.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.X64.Decls.fsti.checked",
"Vale.X64.CPU_Features_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.InsBasic.fsti"
} | [
"total"
] | [
"Prims.unit",
"Vale.X64.QuickCode.va_QProc",
"Vale.X64.InsBasic.va_code_Newline",
"Prims.Nil",
"Vale.X64.QuickCode.mod_t",
"Vale.X64.InsBasic.va_wp_Newline",
"Vale.X64.InsBasic.va_wpProof_Newline",
"Vale.X64.QuickCode.va_quickCode"
] | [] | module Vale.X64.InsBasic
open FStar.Mul
open Vale.Def.Types_s
open Vale.Arch.HeapImpl
open Vale.Arch.Types
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.X64.Decls
open Vale.X64.QuickCode
unfold let vale_heap = Vale.X64.Memory.vale_heap
unfold let vale_stack = Vale.X64.Stack_i.vale_stack
open Vale.X64.CPU_Features_s
//-- Mov64
val va_code_Mov64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Mov64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Mov64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Mov64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == va_eval_opr64 va_s0 src /\ va_state_eq va_sM (va_update_ok va_sM
(va_update_operand_dst_opr64 dst va_sM va_s0))))
[@ va_qattr]
let va_wp_Mov64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state
-> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ (forall
(va_x_dst:va_value_dst_opr64) . let va_sM = va_upd_operand_dst_opr64 dst va_x_dst va_s0 in
va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst == va_eval_opr64 va_s0 src ==> va_k va_sM (())))
val va_wpProof_Mov64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Mov64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Mov64 dst src) ([va_mod_dst_opr64
dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Mov64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Mov64 dst src)) =
(va_QProc (va_code_Mov64 dst src) ([va_mod_dst_opr64 dst]) (va_wp_Mov64 dst src)
(va_wpProof_Mov64 dst src))
//--
//-- Cmovc64
val va_code_Cmovc64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Cmovc64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Cmovc64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Cmovc64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst
va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ Vale.X64.Decls.valid_cf (va_get_flags
va_s0)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\ (if
Vale.X64.Decls.cf (va_get_flags va_sM) then (va_eval_dst_opr64 va_sM dst = va_eval_opr64 va_s0
src) else (va_eval_dst_opr64 va_sM dst = va_eval_dst_opr64 va_s0 dst)) /\ va_state_eq va_sM
(va_update_ok va_sM (va_update_operand_dst_opr64 dst va_sM va_s0))))
[@ va_qattr]
let va_wp_Cmovc64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\
Vale.X64.Decls.valid_cf (va_get_flags va_s0) /\ (forall (va_x_dst:va_value_dst_opr64) . let
va_sM = va_upd_operand_dst_opr64 dst va_x_dst va_s0 in va_get_ok va_sM /\ va_if
(Vale.X64.Decls.cf (va_get_flags va_sM)) (fun _ -> va_eval_dst_opr64 va_sM dst = va_eval_opr64
va_s0 src) (fun _ -> va_eval_dst_opr64 va_sM dst = va_eval_dst_opr64 va_s0 dst) ==> va_k va_sM
(())))
val va_wpProof_Cmovc64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Cmovc64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Cmovc64 dst src) ([va_mod_dst_opr64
dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Cmovc64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Cmovc64 dst src)) =
(va_QProc (va_code_Cmovc64 dst src) ([va_mod_dst_opr64 dst]) (va_wp_Cmovc64 dst src)
(va_wpProof_Cmovc64 dst src))
//--
//-- Add64
val va_code_Add64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Add64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Add64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Add64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ va_eval_opr64 va_s0 src + va_eval_dst_opr64
va_s0 dst < pow2_64))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == va_eval_dst_opr64 va_s0 dst + va_eval_opr64 va_s0 src /\
va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM (va_update_operand_dst_opr64 dst
va_sM va_s0)))))
[@ va_qattr]
let va_wp_Add64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state
-> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ va_eval_opr64
va_s0 src + va_eval_dst_opr64 va_s0 dst < pow2_64 /\ (forall (va_x_dst:va_value_dst_opr64)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_operand_dst_opr64 dst
va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst == va_eval_dst_opr64 va_s0
dst + va_eval_opr64 va_s0 src ==> va_k va_sM (())))
val va_wpProof_Add64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Add64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Add64 dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Add64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Add64 dst src)) =
(va_QProc (va_code_Add64 dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Add64 dst src)
(va_wpProof_Add64 dst src))
//--
//-- Add64Wrap
val va_code_Add64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Add64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Add64Wrap : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Add64Wrap dst src) va_s0 /\ va_is_dst_dst_opr64 dst
va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.add_wrap64 (va_eval_dst_opr64 va_s0 dst)
(va_eval_opr64 va_s0 src) /\ Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64
va_s0 dst + va_eval_opr64 va_s0 src >= pow2_64) /\ va_state_eq va_sM (va_update_flags va_sM
(va_update_ok va_sM (va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Add64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ (forall
(va_x_dst:va_value_dst_opr64) (va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl
(va_upd_operand_dst_opr64 dst va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst
== Vale.Arch.Types.add_wrap64 (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src) /\
Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64 va_s0 dst + va_eval_opr64
va_s0 src >= pow2_64) ==> va_k va_sM (())))
val va_wpProof_Add64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Add64Wrap dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Add64Wrap dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Add64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Add64Wrap dst src)) =
(va_QProc (va_code_Add64Wrap dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Add64Wrap dst
src) (va_wpProof_Add64Wrap dst src))
//--
//-- AddLea64
val va_code_AddLea64 : dst:va_operand_dst_opr64 -> src1:va_operand_opr64 -> src2:va_operand_opr64
-> Tot va_code
val va_codegen_success_AddLea64 : dst:va_operand_dst_opr64 -> src1:va_operand_opr64 ->
src2:va_operand_opr64 -> Tot va_pbool
val va_lemma_AddLea64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src1:va_operand_opr64 -> src2:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_AddLea64 dst src1 src2) va_s0 /\ va_is_dst_dst_opr64
dst va_s0 /\ va_is_src_opr64 src1 va_s0 /\ va_is_src_opr64 src2 va_s0 /\ va_get_ok va_s0 /\
Vale.X64.Decls.max_one_mem src1 src2 /\ va_eval_opr64 va_s0 src1 + va_eval_opr64 va_s0 src2 <
pow2_64))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == va_eval_opr64 va_s0 src1 + va_eval_opr64 va_s0 src2 /\
va_state_eq va_sM (va_update_ok va_sM (va_update_operand_dst_opr64 dst va_sM va_s0))))
[@ va_qattr]
let va_wp_AddLea64 (dst:va_operand_dst_opr64) (src1:va_operand_opr64) (src2:va_operand_opr64)
(va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src1 va_s0 /\ va_is_src_opr64 src2 va_s0 /\
va_get_ok va_s0 /\ Vale.X64.Decls.max_one_mem src1 src2 /\ va_eval_opr64 va_s0 src1 +
va_eval_opr64 va_s0 src2 < pow2_64 /\ (forall (va_x_dst:va_value_dst_opr64) . let va_sM =
va_upd_operand_dst_opr64 dst va_x_dst va_s0 in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst
== va_eval_opr64 va_s0 src1 + va_eval_opr64 va_s0 src2 ==> va_k va_sM (())))
val va_wpProof_AddLea64 : dst:va_operand_dst_opr64 -> src1:va_operand_opr64 ->
src2:va_operand_opr64 -> va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_AddLea64 dst src1 src2 va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_AddLea64 dst src1 src2)
([va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_AddLea64 (dst:va_operand_dst_opr64) (src1:va_operand_opr64) (src2:va_operand_opr64) :
(va_quickCode unit (va_code_AddLea64 dst src1 src2)) =
(va_QProc (va_code_AddLea64 dst src1 src2) ([va_mod_dst_opr64 dst]) (va_wp_AddLea64 dst src1
src2) (va_wpProof_AddLea64 dst src1 src2))
//--
//-- Adc64
val va_code_Adc64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Adc64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Adc64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Adc64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ va_eval_opr64 va_s0 src + va_eval_dst_opr64
va_s0 dst + 1 < pow2_64 /\ Vale.X64.Decls.valid_cf (va_get_flags va_s0)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == va_eval_dst_opr64 va_s0 dst + va_eval_opr64 va_s0 src + (if
Vale.X64.Decls.cf (va_get_flags va_s0) then 1 else 0) /\ Vale.X64.Decls.updated_cf
(va_get_flags va_sM) (va_eval_dst_opr64 va_s0 dst + va_eval_opr64 va_s0 src + (if
Vale.X64.Decls.cf (va_get_flags va_s0) then 1 else 0) >= pow2_64) /\ va_state_eq va_sM
(va_update_flags va_sM (va_update_ok va_sM (va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Adc64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state
-> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ va_eval_opr64
va_s0 src + va_eval_dst_opr64 va_s0 dst + 1 < pow2_64 /\ Vale.X64.Decls.valid_cf (va_get_flags
va_s0) /\ (forall (va_x_dst:va_value_dst_opr64) (va_x_efl:Vale.X64.Flags.t) . let va_sM =
va_upd_flags va_x_efl (va_upd_operand_dst_opr64 dst va_x_dst va_s0) in va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == va_eval_dst_opr64 va_s0 dst + va_eval_opr64 va_s0 src + va_if
(Vale.X64.Decls.cf (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0) /\ Vale.X64.Decls.updated_cf
(va_get_flags va_sM) (va_eval_dst_opr64 va_s0 dst + va_eval_opr64 va_s0 src + va_if
(Vale.X64.Decls.cf (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0) >= pow2_64) ==> va_k va_sM
(())))
val va_wpProof_Adc64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Adc64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Adc64 dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Adc64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Adc64 dst src)) =
(va_QProc (va_code_Adc64 dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Adc64 dst src)
(va_wpProof_Adc64 dst src))
//--
//-- Adc64Wrap
val va_code_Adc64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Adc64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Adc64Wrap : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Adc64Wrap dst src) va_s0 /\ va_is_dst_dst_opr64 dst
va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ Vale.X64.Decls.valid_cf (va_get_flags
va_s0)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.add_wrap64 (Vale.Arch.Types.add_wrap64
(va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src)) (if Vale.X64.Decls.cf (va_get_flags
va_s0) then 1 else 0) /\ Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64
va_s0 dst + va_eval_opr64 va_s0 src + (if Vale.X64.Decls.cf (va_get_flags va_s0) then 1 else 0)
>= pow2_64) /\ va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM
(va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Adc64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\
Vale.X64.Decls.valid_cf (va_get_flags va_s0) /\ (forall (va_x_dst:va_value_dst_opr64)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_operand_dst_opr64 dst
va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.add_wrap64
(Vale.Arch.Types.add_wrap64 (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src)) (va_if
(Vale.X64.Decls.cf (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0)) /\
Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64 va_s0 dst + va_eval_opr64
va_s0 src + va_if (Vale.X64.Decls.cf (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0) >=
pow2_64) ==> va_k va_sM (())))
val va_wpProof_Adc64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Adc64Wrap dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Adc64Wrap dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Adc64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Adc64Wrap dst src)) =
(va_QProc (va_code_Adc64Wrap dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Adc64Wrap dst
src) (va_wpProof_Adc64Wrap dst src))
//--
//-- Adcx64Wrap
val va_code_Adcx64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Adcx64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Adcx64Wrap : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Adcx64Wrap dst src) va_s0 /\ va_is_dst_dst_opr64 dst
va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ adx_enabled /\ Vale.X64.Decls.valid_cf
(va_get_flags va_s0)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.add_wrap64 (Vale.Arch.Types.add_wrap64
(va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src)) (if Vale.X64.Decls.cf (va_get_flags
va_s0) then 1 else 0) /\ Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64
va_s0 dst + va_eval_opr64 va_s0 src + (if Vale.X64.Decls.cf (va_get_flags va_s0) then 1 else 0)
>= pow2_64) /\ Vale.X64.Decls.maintained_of (va_get_flags va_sM) (va_get_flags va_s0) /\
va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM (va_update_operand_dst_opr64 dst
va_sM va_s0)))))
[@ va_qattr]
let va_wp_Adcx64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ adx_enabled /\
Vale.X64.Decls.valid_cf (va_get_flags va_s0) /\ (forall (va_x_dst:va_value_dst_opr64)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_operand_dst_opr64 dst
va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.add_wrap64
(Vale.Arch.Types.add_wrap64 (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src)) (va_if
(Vale.X64.Decls.cf (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0)) /\
Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64 va_s0 dst + va_eval_opr64
va_s0 src + va_if (Vale.X64.Decls.cf (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0) >=
pow2_64) /\ Vale.X64.Decls.maintained_of (va_get_flags va_sM) (va_get_flags va_s0) ==> va_k
va_sM (())))
val va_wpProof_Adcx64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Adcx64Wrap dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Adcx64Wrap dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Adcx64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Adcx64Wrap dst src)) =
(va_QProc (va_code_Adcx64Wrap dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Adcx64Wrap
dst src) (va_wpProof_Adcx64Wrap dst src))
//--
//-- Adox64Wrap
val va_code_Adox64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Adox64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Adox64Wrap : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Adox64Wrap dst src) va_s0 /\ va_is_dst_dst_opr64 dst
va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ adx_enabled /\ Vale.X64.Decls.valid_of
(va_get_flags va_s0)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.add_wrap64 (Vale.Arch.Types.add_wrap64
(va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src)) (if Vale.X64.Decls.overflow
(va_get_flags va_s0) then 1 else 0) /\ Vale.X64.Decls.updated_of (va_get_flags va_sM)
(va_eval_dst_opr64 va_s0 dst + va_eval_opr64 va_s0 src + (if Vale.X64.Decls.overflow
(va_get_flags va_s0) then 1 else 0) >= pow2_64) /\ Vale.X64.Decls.maintained_cf (va_get_flags
va_sM) (va_get_flags va_s0) /\ va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM
(va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Adox64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ adx_enabled /\
Vale.X64.Decls.valid_of (va_get_flags va_s0) /\ (forall (va_x_dst:va_value_dst_opr64)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_operand_dst_opr64 dst
va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.add_wrap64
(Vale.Arch.Types.add_wrap64 (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src)) (va_if
(Vale.X64.Decls.overflow (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0)) /\
Vale.X64.Decls.updated_of (va_get_flags va_sM) (va_eval_dst_opr64 va_s0 dst + va_eval_opr64
va_s0 src + va_if (Vale.X64.Decls.overflow (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0) >=
pow2_64) /\ Vale.X64.Decls.maintained_cf (va_get_flags va_sM) (va_get_flags va_s0) ==> va_k
va_sM (())))
val va_wpProof_Adox64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Adox64Wrap dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Adox64Wrap dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Adox64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Adox64Wrap dst src)) =
(va_QProc (va_code_Adox64Wrap dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Adox64Wrap
dst src) (va_wpProof_Adox64Wrap dst src))
//--
//-- Sub64
val va_code_Sub64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Sub64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Sub64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Sub64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ 0 <= va_eval_dst_opr64 va_s0 dst -
va_eval_opr64 va_s0 src))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == va_eval_dst_opr64 va_s0 dst - va_eval_opr64 va_s0 src /\
va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM (va_update_operand_dst_opr64 dst
va_sM va_s0)))))
[@ va_qattr]
let va_wp_Sub64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state
-> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ 0 <=
va_eval_dst_opr64 va_s0 dst - va_eval_opr64 va_s0 src /\ (forall (va_x_dst:va_value_dst_opr64)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_operand_dst_opr64 dst
va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst == va_eval_dst_opr64 va_s0
dst - va_eval_opr64 va_s0 src ==> va_k va_sM (())))
val va_wpProof_Sub64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Sub64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Sub64 dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Sub64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Sub64 dst src)) =
(va_QProc (va_code_Sub64 dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Sub64 dst src)
(va_wpProof_Sub64 dst src))
//--
//-- Sub64Wrap
val va_code_Sub64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Sub64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Sub64Wrap : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Sub64Wrap dst src) va_s0 /\ va_is_dst_dst_opr64 dst
va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.sub_wrap64 (va_eval_dst_opr64 va_s0 dst)
(va_eval_opr64 va_s0 src) /\ Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64
va_s0 dst - va_eval_opr64 va_s0 src < 0) /\ va_state_eq va_sM (va_update_flags va_sM
(va_update_ok va_sM (va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Sub64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ (forall
(va_x_dst:va_value_dst_opr64) (va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl
(va_upd_operand_dst_opr64 dst va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst
== Vale.Arch.Types.sub_wrap64 (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src) /\
Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64 va_s0 dst - va_eval_opr64
va_s0 src < 0) ==> va_k va_sM (())))
val va_wpProof_Sub64Wrap : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Sub64Wrap dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Sub64Wrap dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Sub64Wrap (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Sub64Wrap dst src)) =
(va_QProc (va_code_Sub64Wrap dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Sub64Wrap dst
src) (va_wpProof_Sub64Wrap dst src))
//--
//-- Sbb64
val va_code_Sbb64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Sbb64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Sbb64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Sbb64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ Vale.X64.Decls.valid_cf (va_get_flags
va_s0)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.sub_wrap64 (va_eval_dst_opr64 va_s0 dst)
(Vale.Arch.Types.add_wrap64 (va_eval_opr64 va_s0 src) (if Vale.X64.Decls.cf (va_get_flags
va_s0) then 1 else 0)) /\ Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64
va_s0 dst - (va_eval_opr64 va_s0 src + (if Vale.X64.Decls.cf (va_get_flags va_s0) then 1 else
0)) < 0) /\ va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM
(va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Sbb64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state
-> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\
Vale.X64.Decls.valid_cf (va_get_flags va_s0) /\ (forall (va_x_dst:va_value_dst_opr64)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_operand_dst_opr64 dst
va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.sub_wrap64
(va_eval_dst_opr64 va_s0 dst) (Vale.Arch.Types.add_wrap64 (va_eval_opr64 va_s0 src) (va_if
(Vale.X64.Decls.cf (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0))) /\
Vale.X64.Decls.updated_cf (va_get_flags va_sM) (va_eval_dst_opr64 va_s0 dst - (va_eval_opr64
va_s0 src + va_if (Vale.X64.Decls.cf (va_get_flags va_s0)) (fun _ -> 1) (fun _ -> 0)) < 0) ==>
va_k va_sM (())))
val va_wpProof_Sbb64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Sbb64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Sbb64 dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Sbb64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Sbb64 dst src)) =
(va_QProc (va_code_Sbb64 dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Sbb64 dst src)
(va_wpProof_Sbb64 dst src))
//--
//-- Mul64Wrap
val va_code_Mul64Wrap : src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Mul64Wrap : src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Mul64Wrap : va_b0:va_code -> va_s0:va_state -> src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Mul64Wrap src) va_s0 /\ va_is_src_opr64 src va_s0 /\
va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_mul_nat pow2_64 (va_get_reg64 rRdx va_sM) + va_get_reg64 rRax va_sM == va_mul_nat
(va_get_reg64 rRax va_s0) (va_eval_opr64 va_s0 src) /\ va_state_eq va_sM (va_update_reg64 rRdx
va_sM (va_update_reg64 rRax va_sM (va_update_flags va_sM (va_update_ok va_sM va_s0))))))
[@ va_qattr]
let va_wp_Mul64Wrap (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) :
Type0 =
(va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ (forall (va_x_efl:Vale.X64.Flags.t)
(va_x_rax:nat64) (va_x_rdx:nat64) . let va_sM = va_upd_reg64 rRdx va_x_rdx (va_upd_reg64 rRax
va_x_rax (va_upd_flags va_x_efl va_s0)) in va_get_ok va_sM /\ va_mul_nat pow2_64 (va_get_reg64
rRdx va_sM) + va_get_reg64 rRax va_sM == va_mul_nat (va_get_reg64 rRax va_s0) (va_eval_opr64
va_s0 src) ==> va_k va_sM (())))
val va_wpProof_Mul64Wrap : src:va_operand_opr64 -> va_s0:va_state -> va_k:(va_state -> unit ->
Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Mul64Wrap src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Mul64Wrap src) ([va_Mod_reg64 rRdx;
va_Mod_reg64 rRax; va_Mod_flags]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Mul64Wrap (src:va_operand_opr64) : (va_quickCode unit (va_code_Mul64Wrap src)) =
(va_QProc (va_code_Mul64Wrap src) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRax; va_Mod_flags])
(va_wp_Mul64Wrap src) (va_wpProof_Mul64Wrap src))
//--
//-- Mulx64
val va_code_Mulx64 : dst_hi:va_operand_dst_opr64 -> dst_lo:va_operand_dst_opr64 ->
src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Mulx64 : dst_hi:va_operand_dst_opr64 -> dst_lo:va_operand_dst_opr64 ->
src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Mulx64 : va_b0:va_code -> va_s0:va_state -> dst_hi:va_operand_dst_opr64 ->
dst_lo:va_operand_dst_opr64 -> src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Mulx64 dst_hi dst_lo src) va_s0 /\ va_is_dst_dst_opr64
dst_hi va_s0 /\ va_is_dst_dst_opr64 dst_lo va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok
va_s0 /\ bmi2_enabled /\ dst_hi =!= dst_lo))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_mul_nat pow2_64 (va_eval_dst_opr64 va_sM dst_hi) + va_eval_dst_opr64 va_sM dst_lo ==
va_mul_nat (va_get_reg64 rRdx va_s0) (va_eval_opr64 va_s0 src) /\ va_state_eq va_sM
(va_update_ok va_sM (va_update_operand_dst_opr64 dst_lo va_sM (va_update_operand_dst_opr64
dst_hi va_sM va_s0)))))
[@ va_qattr]
let va_wp_Mulx64 (dst_hi:va_operand_dst_opr64) (dst_lo:va_operand_dst_opr64) (src:va_operand_opr64)
(va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst_hi va_s0 /\ va_is_dst_dst_opr64 dst_lo va_s0 /\ va_is_src_opr64 src
va_s0 /\ va_get_ok va_s0 /\ bmi2_enabled /\ dst_hi =!= dst_lo /\ (forall
(va_x_dst_hi:va_value_dst_opr64) (va_x_dst_lo:va_value_dst_opr64) . let va_sM =
va_upd_operand_dst_opr64 dst_lo va_x_dst_lo (va_upd_operand_dst_opr64 dst_hi va_x_dst_hi va_s0)
in va_get_ok va_sM /\ va_mul_nat pow2_64 (va_eval_dst_opr64 va_sM dst_hi) + va_eval_dst_opr64
va_sM dst_lo == va_mul_nat (va_get_reg64 rRdx va_s0) (va_eval_opr64 va_s0 src) ==> va_k va_sM
(())))
val va_wpProof_Mulx64 : dst_hi:va_operand_dst_opr64 -> dst_lo:va_operand_dst_opr64 ->
src:va_operand_opr64 -> va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Mulx64 dst_hi dst_lo src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Mulx64 dst_hi dst_lo src)
([va_mod_dst_opr64 dst_lo; va_mod_dst_opr64 dst_hi]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Mulx64 (dst_hi:va_operand_dst_opr64) (dst_lo:va_operand_dst_opr64)
(src:va_operand_opr64) : (va_quickCode unit (va_code_Mulx64 dst_hi dst_lo src)) =
(va_QProc (va_code_Mulx64 dst_hi dst_lo src) ([va_mod_dst_opr64 dst_lo; va_mod_dst_opr64 dst_hi])
(va_wp_Mulx64 dst_hi dst_lo src) (va_wpProof_Mulx64 dst_hi dst_lo src))
//--
//-- IMul64
val va_code_IMul64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_IMul64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_IMul64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_IMul64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ va_mul_nat (va_eval_dst_opr64 va_s0 dst)
(va_eval_opr64 va_s0 src) < pow2_64))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == va_mul_nat (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0
src) /\ va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM
(va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_IMul64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state
-> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ va_mul_nat
(va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src) < pow2_64 /\ (forall
(va_x_dst:va_value_dst_opr64) (va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl
(va_upd_operand_dst_opr64 dst va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst
== va_mul_nat (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src) ==> va_k va_sM (())))
val va_wpProof_IMul64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_IMul64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_IMul64 dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_IMul64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_IMul64 dst src)) =
(va_QProc (va_code_IMul64 dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_IMul64 dst src)
(va_wpProof_IMul64 dst src))
//--
//-- Xor64
val va_code_Xor64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_Xor64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_Xor64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Xor64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.ixor64 (va_eval_dst_opr64 va_s0 dst)
(va_eval_opr64 va_s0 src) /\ ~(Vale.X64.Decls.overflow (va_get_flags va_sM)) /\
~(Vale.X64.Decls.cf (va_get_flags va_sM)) /\ Vale.X64.Decls.valid_cf (va_get_flags va_sM) /\
Vale.X64.Decls.valid_of (va_get_flags va_sM) /\ va_state_eq va_sM (va_update_flags va_sM
(va_update_ok va_sM (va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Xor64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state
-> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ (forall
(va_x_dst:va_value_dst_opr64) (va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl
(va_upd_operand_dst_opr64 dst va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst
== Vale.Arch.Types.ixor64 (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src) /\
~(Vale.X64.Decls.overflow (va_get_flags va_sM)) /\ ~(Vale.X64.Decls.cf (va_get_flags va_sM)) /\
Vale.X64.Decls.valid_cf (va_get_flags va_sM) /\ Vale.X64.Decls.valid_of (va_get_flags va_sM)
==> va_k va_sM (())))
val va_wpProof_Xor64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Xor64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Xor64 dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Xor64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_Xor64 dst src)) =
(va_QProc (va_code_Xor64 dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Xor64 dst src)
(va_wpProof_Xor64 dst src))
//--
//-- And64
val va_code_And64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_code
val va_codegen_success_And64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> Tot va_pbool
val va_lemma_And64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
src:va_operand_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_And64 dst src) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.iand64 (va_eval_dst_opr64 va_s0 dst)
(va_eval_opr64 va_s0 src) /\ va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM
(va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_And64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) (va_s0:va_state) (va_k:(va_state
-> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_opr64 src va_s0 /\ va_get_ok va_s0 /\ (forall
(va_x_dst:va_value_dst_opr64) (va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl
(va_upd_operand_dst_opr64 dst va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst
== Vale.Arch.Types.iand64 (va_eval_dst_opr64 va_s0 dst) (va_eval_opr64 va_s0 src) ==> va_k
va_sM (())))
val va_wpProof_And64 : dst:va_operand_dst_opr64 -> src:va_operand_opr64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_And64 dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_And64 dst src) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_And64 (dst:va_operand_dst_opr64) (src:va_operand_opr64) : (va_quickCode unit
(va_code_And64 dst src)) =
(va_QProc (va_code_And64 dst src) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_And64 dst src)
(va_wpProof_And64 dst src))
//--
//-- Shl64
val va_code_Shl64 : dst:va_operand_dst_opr64 -> amt:va_operand_shift_amt64 -> Tot va_code
val va_codegen_success_Shl64 : dst:va_operand_dst_opr64 -> amt:va_operand_shift_amt64 -> Tot
va_pbool
val va_lemma_Shl64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
amt:va_operand_shift_amt64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Shl64 dst amt) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_shift_amt64 amt va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.ishl64 (va_eval_dst_opr64 va_s0 dst)
(va_eval_shift_amt64 va_s0 amt) /\ va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM
(va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Shl64 (dst:va_operand_dst_opr64) (amt:va_operand_shift_amt64) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_shift_amt64 amt va_s0 /\ va_get_ok va_s0 /\ (forall
(va_x_dst:va_value_dst_opr64) (va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl
(va_upd_operand_dst_opr64 dst va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst
== Vale.Arch.Types.ishl64 (va_eval_dst_opr64 va_s0 dst) (va_eval_shift_amt64 va_s0 amt) ==>
va_k va_sM (())))
val va_wpProof_Shl64 : dst:va_operand_dst_opr64 -> amt:va_operand_shift_amt64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Shl64 dst amt va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Shl64 dst amt) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Shl64 (dst:va_operand_dst_opr64) (amt:va_operand_shift_amt64) : (va_quickCode unit
(va_code_Shl64 dst amt)) =
(va_QProc (va_code_Shl64 dst amt) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Shl64 dst amt)
(va_wpProof_Shl64 dst amt))
//--
//-- Shr64
val va_code_Shr64 : dst:va_operand_dst_opr64 -> amt:va_operand_shift_amt64 -> Tot va_code
val va_codegen_success_Shr64 : dst:va_operand_dst_opr64 -> amt:va_operand_shift_amt64 -> Tot
va_pbool
val va_lemma_Shr64 : va_b0:va_code -> va_s0:va_state -> dst:va_operand_dst_opr64 ->
amt:va_operand_shift_amt64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Shr64 dst amt) va_s0 /\ va_is_dst_dst_opr64 dst va_s0
/\ va_is_src_shift_amt64 amt va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_eval_dst_opr64 va_sM dst == Vale.Arch.Types.ishr64 (va_eval_dst_opr64 va_s0 dst)
(va_eval_shift_amt64 va_s0 amt) /\ va_state_eq va_sM (va_update_flags va_sM (va_update_ok va_sM
(va_update_operand_dst_opr64 dst va_sM va_s0)))))
[@ va_qattr]
let va_wp_Shr64 (dst:va_operand_dst_opr64) (amt:va_operand_shift_amt64) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_dst_opr64 dst va_s0 /\ va_is_src_shift_amt64 amt va_s0 /\ va_get_ok va_s0 /\ (forall
(va_x_dst:va_value_dst_opr64) (va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl
(va_upd_operand_dst_opr64 dst va_x_dst va_s0) in va_get_ok va_sM /\ va_eval_dst_opr64 va_sM dst
== Vale.Arch.Types.ishr64 (va_eval_dst_opr64 va_s0 dst) (va_eval_shift_amt64 va_s0 amt) ==>
va_k va_sM (())))
val va_wpProof_Shr64 : dst:va_operand_dst_opr64 -> amt:va_operand_shift_amt64 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Shr64 dst amt va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Shr64 dst amt) ([va_Mod_flags;
va_mod_dst_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Shr64 (dst:va_operand_dst_opr64) (amt:va_operand_shift_amt64) : (va_quickCode unit
(va_code_Shr64 dst amt)) =
(va_QProc (va_code_Shr64 dst amt) ([va_Mod_flags; va_mod_dst_opr64 dst]) (va_wp_Shr64 dst amt)
(va_wpProof_Shr64 dst amt))
//--
//-- Cpuid_AES
val va_code_Cpuid_AES : va_dummy:unit -> Tot va_code
val va_codegen_success_Cpuid_AES : va_dummy:unit -> Tot va_pbool
val va_lemma_Cpuid_AES : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Cpuid_AES ()) va_s0 /\ va_get_ok va_s0 /\ va_get_reg64
rRax va_s0 = 1))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 33554432 > 0 == aesni_enabled /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 2 > 0 == pclmulqdq_enabled /\ va_state_eq
va_sM (va_update_reg64 rRdx va_sM (va_update_reg64 rRcx va_sM (va_update_reg64 rRbx va_sM
(va_update_reg64 rRax va_sM (va_update_ok va_sM va_s0)))))))
[@ va_qattr]
let va_wp_Cpuid_AES (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ va_get_reg64 rRax va_s0 = 1 /\ (forall (va_x_rax:nat64) (va_x_rbx:nat64)
(va_x_rcx:nat64) (va_x_rdx:nat64) . let va_sM = va_upd_reg64 rRdx va_x_rdx (va_upd_reg64 rRcx
va_x_rcx (va_upd_reg64 rRbx va_x_rbx (va_upd_reg64 rRax va_x_rax va_s0))) in va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 33554432 > 0 == aesni_enabled /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 2 > 0 == pclmulqdq_enabled ==> va_k va_sM
(())))
val va_wpProof_Cpuid_AES : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Cpuid_AES va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Cpuid_AES ()) ([va_Mod_reg64 rRdx;
va_Mod_reg64 rRcx; va_Mod_reg64 rRbx; va_Mod_reg64 rRax]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Cpuid_AES () : (va_quickCode unit (va_code_Cpuid_AES ())) =
(va_QProc (va_code_Cpuid_AES ()) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRcx; va_Mod_reg64 rRbx;
va_Mod_reg64 rRax]) va_wp_Cpuid_AES va_wpProof_Cpuid_AES)
//--
//-- Cpuid_Sha
val va_code_Cpuid_Sha : va_dummy:unit -> Tot va_code
val va_codegen_success_Cpuid_Sha : va_dummy:unit -> Tot va_pbool
val va_lemma_Cpuid_Sha : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Cpuid_Sha ()) va_s0 /\ va_get_ok va_s0 /\ va_get_reg64
rRax va_s0 = 7 /\ va_get_reg64 rRcx va_s0 = 0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 536870912 > 0 == sha_enabled /\ va_state_eq
va_sM (va_update_reg64 rRdx va_sM (va_update_reg64 rRcx va_sM (va_update_reg64 rRbx va_sM
(va_update_reg64 rRax va_sM (va_update_ok va_sM va_s0)))))))
[@ va_qattr]
let va_wp_Cpuid_Sha (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ va_get_reg64 rRax va_s0 = 7 /\ va_get_reg64 rRcx va_s0 = 0 /\ (forall
(va_x_rax:nat64) (va_x_rbx:nat64) (va_x_rcx:nat64) (va_x_rdx:nat64) . let va_sM = va_upd_reg64
rRdx va_x_rdx (va_upd_reg64 rRcx va_x_rcx (va_upd_reg64 rRbx va_x_rbx (va_upd_reg64 rRax
va_x_rax va_s0))) in va_get_ok va_sM /\ Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM)
536870912 > 0 == sha_enabled ==> va_k va_sM (())))
val va_wpProof_Cpuid_Sha : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Cpuid_Sha va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Cpuid_Sha ()) ([va_Mod_reg64 rRdx;
va_Mod_reg64 rRcx; va_Mod_reg64 rRbx; va_Mod_reg64 rRax]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Cpuid_Sha () : (va_quickCode unit (va_code_Cpuid_Sha ())) =
(va_QProc (va_code_Cpuid_Sha ()) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRcx; va_Mod_reg64 rRbx;
va_Mod_reg64 rRax]) va_wp_Cpuid_Sha va_wpProof_Cpuid_Sha)
//--
//-- Cpuid_Adx_Bmi2
val va_code_Cpuid_Adx_Bmi2 : va_dummy:unit -> Tot va_code
val va_codegen_success_Cpuid_Adx_Bmi2 : va_dummy:unit -> Tot va_pbool
val va_lemma_Cpuid_Adx_Bmi2 : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Cpuid_Adx_Bmi2 ()) va_s0 /\ va_get_ok va_s0 /\
va_get_reg64 rRax va_s0 = 7 /\ va_get_reg64 rRcx va_s0 = 0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 256 > 0 == bmi2_enabled /\
Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 524288 > 0 == adx_enabled /\ va_state_eq va_sM
(va_update_reg64 rRdx va_sM (va_update_reg64 rRcx va_sM (va_update_reg64 rRbx va_sM
(va_update_reg64 rRax va_sM (va_update_ok va_sM va_s0)))))))
[@ va_qattr]
let va_wp_Cpuid_Adx_Bmi2 (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ va_get_reg64 rRax va_s0 = 7 /\ va_get_reg64 rRcx va_s0 = 0 /\ (forall
(va_x_rax:nat64) (va_x_rbx:nat64) (va_x_rcx:nat64) (va_x_rdx:nat64) . let va_sM = va_upd_reg64
rRdx va_x_rdx (va_upd_reg64 rRcx va_x_rcx (va_upd_reg64 rRbx va_x_rbx (va_upd_reg64 rRax
va_x_rax va_s0))) in va_get_ok va_sM /\ Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 256 >
0 == bmi2_enabled /\ Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 524288 > 0 == adx_enabled
==> va_k va_sM (())))
val va_wpProof_Cpuid_Adx_Bmi2 : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Cpuid_Adx_Bmi2 va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Cpuid_Adx_Bmi2 ()) ([va_Mod_reg64
rRdx; va_Mod_reg64 rRcx; va_Mod_reg64 rRbx; va_Mod_reg64 rRax]) va_s0 va_k ((va_sM, va_f0,
va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Cpuid_Adx_Bmi2 () : (va_quickCode unit (va_code_Cpuid_Adx_Bmi2 ())) =
(va_QProc (va_code_Cpuid_Adx_Bmi2 ()) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRcx; va_Mod_reg64 rRbx;
va_Mod_reg64 rRax]) va_wp_Cpuid_Adx_Bmi2 va_wpProof_Cpuid_Adx_Bmi2)
//--
//-- Cpuid_Avx
val va_code_Cpuid_Avx : va_dummy:unit -> Tot va_code
val va_codegen_success_Cpuid_Avx : va_dummy:unit -> Tot va_pbool
val va_lemma_Cpuid_Avx : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Cpuid_Avx ()) va_s0 /\ va_get_ok va_s0 /\ va_get_reg64
rRax va_s0 = 1))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 268435456 > 0 == avx_cpuid_enabled /\
va_state_eq va_sM (va_update_reg64 rRdx va_sM (va_update_reg64 rRcx va_sM (va_update_reg64 rRbx
va_sM (va_update_reg64 rRax va_sM (va_update_ok va_sM va_s0)))))))
[@ va_qattr]
let va_wp_Cpuid_Avx (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ va_get_reg64 rRax va_s0 = 1 /\ (forall (va_x_rax:nat64) (va_x_rbx:nat64)
(va_x_rcx:nat64) (va_x_rdx:nat64) . let va_sM = va_upd_reg64 rRdx va_x_rdx (va_upd_reg64 rRcx
va_x_rcx (va_upd_reg64 rRbx va_x_rbx (va_upd_reg64 rRax va_x_rax va_s0))) in va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 268435456 > 0 == avx_cpuid_enabled ==> va_k
va_sM (())))
val va_wpProof_Cpuid_Avx : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Cpuid_Avx va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Cpuid_Avx ()) ([va_Mod_reg64 rRdx;
va_Mod_reg64 rRcx; va_Mod_reg64 rRbx; va_Mod_reg64 rRax]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Cpuid_Avx () : (va_quickCode unit (va_code_Cpuid_Avx ())) =
(va_QProc (va_code_Cpuid_Avx ()) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRcx; va_Mod_reg64 rRbx;
va_Mod_reg64 rRax]) va_wp_Cpuid_Avx va_wpProof_Cpuid_Avx)
//--
//-- Cpuid_Avx2
val va_code_Cpuid_Avx2 : va_dummy:unit -> Tot va_code
val va_codegen_success_Cpuid_Avx2 : va_dummy:unit -> Tot va_pbool
val va_lemma_Cpuid_Avx2 : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Cpuid_Avx2 ()) va_s0 /\ va_get_ok va_s0 /\
va_get_reg64 rRax va_s0 = 7 /\ va_get_reg64 rRcx va_s0 = 0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 32 > 0 == avx2_cpuid_enabled /\ va_state_eq
va_sM (va_update_reg64 rRdx va_sM (va_update_reg64 rRcx va_sM (va_update_reg64 rRbx va_sM
(va_update_reg64 rRax va_sM (va_update_ok va_sM va_s0)))))))
[@ va_qattr]
let va_wp_Cpuid_Avx2 (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ va_get_reg64 rRax va_s0 = 7 /\ va_get_reg64 rRcx va_s0 = 0 /\ (forall
(va_x_rax:nat64) (va_x_rbx:nat64) (va_x_rcx:nat64) (va_x_rdx:nat64) . let va_sM = va_upd_reg64
rRdx va_x_rdx (va_upd_reg64 rRcx va_x_rcx (va_upd_reg64 rRbx va_x_rbx (va_upd_reg64 rRax
va_x_rax va_s0))) in va_get_ok va_sM /\ Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 32 > 0
== avx2_cpuid_enabled ==> va_k va_sM (())))
val va_wpProof_Cpuid_Avx2 : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Cpuid_Avx2 va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Cpuid_Avx2 ()) ([va_Mod_reg64 rRdx;
va_Mod_reg64 rRcx; va_Mod_reg64 rRbx; va_Mod_reg64 rRax]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Cpuid_Avx2 () : (va_quickCode unit (va_code_Cpuid_Avx2 ())) =
(va_QProc (va_code_Cpuid_Avx2 ()) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRcx; va_Mod_reg64 rRbx;
va_Mod_reg64 rRax]) va_wp_Cpuid_Avx2 va_wpProof_Cpuid_Avx2)
//--
//-- Cpuid_Sse
val va_code_Cpuid_Sse : va_dummy:unit -> Tot va_code
val va_codegen_success_Cpuid_Sse : va_dummy:unit -> Tot va_pbool
val va_lemma_Cpuid_Sse : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Cpuid_Sse ()) va_s0 /\ va_get_ok va_s0 /\ va_get_reg64
rRax va_s0 = 1))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRdx va_sM) 67108864 > 0 == sse2_enabled /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 524288 > 0 == sse4_1_enabled /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 512 > 0 == ssse3_enabled /\ va_state_eq va_sM
(va_update_reg64 rRdx va_sM (va_update_reg64 rRcx va_sM (va_update_reg64 rRbx va_sM
(va_update_reg64 rRax va_sM (va_update_ok va_sM va_s0)))))))
[@ va_qattr]
let va_wp_Cpuid_Sse (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ va_get_reg64 rRax va_s0 = 1 /\ (forall (va_x_rax:nat64) (va_x_rbx:nat64)
(va_x_rcx:nat64) (va_x_rdx:nat64) . let va_sM = va_upd_reg64 rRdx va_x_rdx (va_upd_reg64 rRcx
va_x_rcx (va_upd_reg64 rRbx va_x_rbx (va_upd_reg64 rRax va_x_rax va_s0))) in va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRdx va_sM) 67108864 > 0 == sse2_enabled /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 524288 > 0 == sse4_1_enabled /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 512 > 0 == ssse3_enabled ==> va_k va_sM (())))
val va_wpProof_Cpuid_Sse : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Cpuid_Sse va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Cpuid_Sse ()) ([va_Mod_reg64 rRdx;
va_Mod_reg64 rRcx; va_Mod_reg64 rRbx; va_Mod_reg64 rRax]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Cpuid_Sse () : (va_quickCode unit (va_code_Cpuid_Sse ())) =
(va_QProc (va_code_Cpuid_Sse ()) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRcx; va_Mod_reg64 rRbx;
va_Mod_reg64 rRax]) va_wp_Cpuid_Sse va_wpProof_Cpuid_Sse)
//--
//-- Cpuid_Movbe
val va_code_Cpuid_Movbe : va_dummy:unit -> Tot va_code
val va_codegen_success_Cpuid_Movbe : va_dummy:unit -> Tot va_pbool
val va_lemma_Cpuid_Movbe : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Cpuid_Movbe ()) va_s0 /\ va_get_ok va_s0 /\
va_get_reg64 rRax va_s0 = 1))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 4194304 > 0 == movbe_enabled /\ va_state_eq
va_sM (va_update_reg64 rRdx va_sM (va_update_reg64 rRcx va_sM (va_update_reg64 rRbx va_sM
(va_update_reg64 rRax va_sM (va_update_ok va_sM va_s0)))))))
[@ va_qattr]
let va_wp_Cpuid_Movbe (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ va_get_reg64 rRax va_s0 = 1 /\ (forall (va_x_rax:nat64) (va_x_rbx:nat64)
(va_x_rcx:nat64) (va_x_rdx:nat64) . let va_sM = va_upd_reg64 rRdx va_x_rdx (va_upd_reg64 rRcx
va_x_rcx (va_upd_reg64 rRbx va_x_rbx (va_upd_reg64 rRax va_x_rax va_s0))) in va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 4194304 > 0 == movbe_enabled ==> va_k va_sM
(())))
val va_wpProof_Cpuid_Movbe : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Cpuid_Movbe va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Cpuid_Movbe ()) ([va_Mod_reg64 rRdx;
va_Mod_reg64 rRcx; va_Mod_reg64 rRbx; va_Mod_reg64 rRax]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Cpuid_Movbe () : (va_quickCode unit (va_code_Cpuid_Movbe ())) =
(va_QProc (va_code_Cpuid_Movbe ()) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRcx; va_Mod_reg64 rRbx;
va_Mod_reg64 rRax]) va_wp_Cpuid_Movbe va_wpProof_Cpuid_Movbe)
//--
//-- Cpuid_Rdrand
val va_code_Cpuid_Rdrand : va_dummy:unit -> Tot va_code
val va_codegen_success_Cpuid_Rdrand : va_dummy:unit -> Tot va_pbool
val va_lemma_Cpuid_Rdrand : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Cpuid_Rdrand ()) va_s0 /\ va_get_ok va_s0 /\
va_get_reg64 rRax va_s0 = 1))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 1073741824 > 0 == rdrand_enabled /\
va_state_eq va_sM (va_update_reg64 rRdx va_sM (va_update_reg64 rRcx va_sM (va_update_reg64 rRbx
va_sM (va_update_reg64 rRax va_sM (va_update_ok va_sM va_s0)))))))
[@ va_qattr]
let va_wp_Cpuid_Rdrand (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ va_get_reg64 rRax va_s0 = 1 /\ (forall (va_x_rax:nat64) (va_x_rbx:nat64)
(va_x_rcx:nat64) (va_x_rdx:nat64) . let va_sM = va_upd_reg64 rRdx va_x_rdx (va_upd_reg64 rRcx
va_x_rcx (va_upd_reg64 rRbx va_x_rbx (va_upd_reg64 rRax va_x_rax va_s0))) in va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 1073741824 > 0 == rdrand_enabled ==> va_k
va_sM (())))
val va_wpProof_Cpuid_Rdrand : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Cpuid_Rdrand va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Cpuid_Rdrand ()) ([va_Mod_reg64 rRdx;
va_Mod_reg64 rRcx; va_Mod_reg64 rRbx; va_Mod_reg64 rRax]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Cpuid_Rdrand () : (va_quickCode unit (va_code_Cpuid_Rdrand ())) =
(va_QProc (va_code_Cpuid_Rdrand ()) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRcx; va_Mod_reg64 rRbx;
va_Mod_reg64 rRax]) va_wp_Cpuid_Rdrand va_wpProof_Cpuid_Rdrand)
//--
//-- Cpuid_Avx512
val va_code_Cpuid_Avx512 : va_dummy:unit -> Tot va_code
val va_codegen_success_Cpuid_Avx512 : va_dummy:unit -> Tot va_pbool
val va_lemma_Cpuid_Avx512 : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Cpuid_Avx512 ()) va_s0 /\ va_get_ok va_s0 /\
va_get_reg64 rRax va_s0 = 7 /\ va_get_reg64 rRcx va_s0 = 0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 65536 > 0 == avx512f_enabled /\
Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 131072 > 0 == avx512dq_enabled /\
Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 1073741824 > 0 == avx512bw_enabled /\
Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 2147483648 > 0 == avx512vl_enabled /\
va_state_eq va_sM (va_update_reg64 rRdx va_sM (va_update_reg64 rRcx va_sM (va_update_reg64 rRbx
va_sM (va_update_reg64 rRax va_sM (va_update_ok va_sM va_s0)))))))
[@ va_qattr]
let va_wp_Cpuid_Avx512 (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ va_get_reg64 rRax va_s0 = 7 /\ va_get_reg64 rRcx va_s0 = 0 /\ (forall
(va_x_rax:nat64) (va_x_rbx:nat64) (va_x_rcx:nat64) (va_x_rdx:nat64) . let va_sM = va_upd_reg64
rRdx va_x_rdx (va_upd_reg64 rRcx va_x_rcx (va_upd_reg64 rRbx va_x_rbx (va_upd_reg64 rRax
va_x_rax va_s0))) in va_get_ok va_sM /\ Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 65536
> 0 == avx512f_enabled /\ Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 131072 > 0 ==
avx512dq_enabled /\ Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 1073741824 > 0 ==
avx512bw_enabled /\ Vale.Arch.Types.iand64 (va_get_reg64 rRbx va_sM) 2147483648 > 0 ==
avx512vl_enabled ==> va_k va_sM (())))
val va_wpProof_Cpuid_Avx512 : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Cpuid_Avx512 va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Cpuid_Avx512 ()) ([va_Mod_reg64 rRdx;
va_Mod_reg64 rRcx; va_Mod_reg64 rRbx; va_Mod_reg64 rRax]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Cpuid_Avx512 () : (va_quickCode unit (va_code_Cpuid_Avx512 ())) =
(va_QProc (va_code_Cpuid_Avx512 ()) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRcx; va_Mod_reg64 rRbx;
va_Mod_reg64 rRax]) va_wp_Cpuid_Avx512 va_wpProof_Cpuid_Avx512)
//--
//-- Cpuid_Osxsave
val va_code_Cpuid_Osxsave : va_dummy:unit -> Tot va_code
val va_codegen_success_Cpuid_Osxsave : va_dummy:unit -> Tot va_pbool
val va_lemma_Cpuid_Osxsave : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Cpuid_Osxsave ()) va_s0 /\ va_get_ok va_s0 /\
va_get_reg64 rRax va_s0 = 1))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 134217728 > 0 == osxsave_enabled /\
va_state_eq va_sM (va_update_reg64 rRdx va_sM (va_update_reg64 rRcx va_sM (va_update_reg64 rRbx
va_sM (va_update_reg64 rRax va_sM (va_update_ok va_sM va_s0)))))))
[@ va_qattr]
let va_wp_Cpuid_Osxsave (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ va_get_reg64 rRax va_s0 = 1 /\ (forall (va_x_rax:nat64) (va_x_rbx:nat64)
(va_x_rcx:nat64) (va_x_rdx:nat64) . let va_sM = va_upd_reg64 rRdx va_x_rdx (va_upd_reg64 rRcx
va_x_rcx (va_upd_reg64 rRbx va_x_rbx (va_upd_reg64 rRax va_x_rax va_s0))) in va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRcx va_sM) 134217728 > 0 == osxsave_enabled ==> va_k
va_sM (())))
val va_wpProof_Cpuid_Osxsave : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Cpuid_Osxsave va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Cpuid_Osxsave ()) ([va_Mod_reg64 rRdx;
va_Mod_reg64 rRcx; va_Mod_reg64 rRbx; va_Mod_reg64 rRax]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Cpuid_Osxsave () : (va_quickCode unit (va_code_Cpuid_Osxsave ())) =
(va_QProc (va_code_Cpuid_Osxsave ()) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRcx; va_Mod_reg64 rRbx;
va_Mod_reg64 rRax]) va_wp_Cpuid_Osxsave va_wpProof_Cpuid_Osxsave)
//--
//-- Xgetbv_Avx
val va_code_Xgetbv_Avx : va_dummy:unit -> Tot va_code
val va_codegen_success_Xgetbv_Avx : va_dummy:unit -> Tot va_pbool
val va_lemma_Xgetbv_Avx : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Xgetbv_Avx ()) va_s0 /\ va_get_ok va_s0 /\
osxsave_enabled /\ va_get_reg64 rRcx va_s0 = 0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRax va_sM) 2 > 0 == sse_xcr0_enabled /\
Vale.Arch.Types.iand64 (va_get_reg64 rRax va_sM) 4 > 0 == avx_xcr0_enabled /\ va_state_eq va_sM
(va_update_reg64 rRdx va_sM (va_update_reg64 rRax va_sM (va_update_ok va_sM va_s0)))))
[@ va_qattr]
let va_wp_Xgetbv_Avx (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ osxsave_enabled /\ va_get_reg64 rRcx va_s0 = 0 /\ (forall (va_x_rax:nat64)
(va_x_rdx:nat64) . let va_sM = va_upd_reg64 rRdx va_x_rdx (va_upd_reg64 rRax va_x_rax va_s0) in
va_get_ok va_sM /\ Vale.Arch.Types.iand64 (va_get_reg64 rRax va_sM) 2 > 0 == sse_xcr0_enabled
/\ Vale.Arch.Types.iand64 (va_get_reg64 rRax va_sM) 4 > 0 == avx_xcr0_enabled ==> va_k va_sM
(())))
val va_wpProof_Xgetbv_Avx : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Xgetbv_Avx va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Xgetbv_Avx ()) ([va_Mod_reg64 rRdx;
va_Mod_reg64 rRax]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Xgetbv_Avx () : (va_quickCode unit (va_code_Xgetbv_Avx ())) =
(va_QProc (va_code_Xgetbv_Avx ()) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRax]) va_wp_Xgetbv_Avx
va_wpProof_Xgetbv_Avx)
//--
//-- Xgetbv_Avx512
val va_code_Xgetbv_Avx512 : va_dummy:unit -> Tot va_code
val va_codegen_success_Xgetbv_Avx512 : va_dummy:unit -> Tot va_pbool
val va_lemma_Xgetbv_Avx512 : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Xgetbv_Avx512 ()) va_s0 /\ va_get_ok va_s0 /\
osxsave_enabled /\ va_get_reg64 rRcx va_s0 = 0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
Vale.Arch.Types.iand64 (va_get_reg64 rRax va_sM) 32 > 0 == opmask_xcr0_enabled /\
Vale.Arch.Types.iand64 (va_get_reg64 rRax va_sM) 64 > 0 == zmm_hi256_xcr0_enabled /\
Vale.Arch.Types.iand64 (va_get_reg64 rRax va_sM) 128 > 0 == hi16_zmm_xcr0_enabled /\
va_state_eq va_sM (va_update_reg64 rRdx va_sM (va_update_reg64 rRax va_sM (va_update_ok va_sM
va_s0)))))
[@ va_qattr]
let va_wp_Xgetbv_Avx512 (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ osxsave_enabled /\ va_get_reg64 rRcx va_s0 = 0 /\ (forall (va_x_rax:nat64)
(va_x_rdx:nat64) . let va_sM = va_upd_reg64 rRdx va_x_rdx (va_upd_reg64 rRax va_x_rax va_s0) in
va_get_ok va_sM /\ Vale.Arch.Types.iand64 (va_get_reg64 rRax va_sM) 32 > 0 ==
opmask_xcr0_enabled /\ Vale.Arch.Types.iand64 (va_get_reg64 rRax va_sM) 64 > 0 ==
zmm_hi256_xcr0_enabled /\ Vale.Arch.Types.iand64 (va_get_reg64 rRax va_sM) 128 > 0 ==
hi16_zmm_xcr0_enabled ==> va_k va_sM (())))
val va_wpProof_Xgetbv_Avx512 : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Xgetbv_Avx512 va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Xgetbv_Avx512 ()) ([va_Mod_reg64 rRdx;
va_Mod_reg64 rRax]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Xgetbv_Avx512 () : (va_quickCode unit (va_code_Xgetbv_Avx512 ())) =
(va_QProc (va_code_Xgetbv_Avx512 ()) ([va_Mod_reg64 rRdx; va_Mod_reg64 rRax]) va_wp_Xgetbv_Avx512
va_wpProof_Xgetbv_Avx512)
//--
//-- Nat64Equal
val va_code_Nat64Equal : dst:va_operand_reg_opr64 -> src:va_operand_reg_opr64 -> Tot va_code
val va_codegen_success_Nat64Equal : dst:va_operand_reg_opr64 -> src:va_operand_reg_opr64 -> Tot
va_pbool
val va_lemma_Nat64Equal : va_b0:va_code -> va_s0:va_state -> dst:va_operand_reg_opr64 ->
src:va_operand_reg_opr64
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Nat64Equal dst src) va_s0 /\ va_is_dst_reg_opr64 dst
va_s0 /\ va_is_dst_reg_opr64 src va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\ (if
(va_eval_reg_opr64 va_s0 src = 18446744073709551615) then (va_eval_reg_opr64 va_sM dst = 0)
else (va_eval_reg_opr64 va_sM dst = 1)) /\ va_state_eq va_sM (va_update_flags va_sM
(va_update_ok va_sM (va_update_operand_reg_opr64 src va_sM (va_update_operand_reg_opr64 dst
va_sM va_s0))))))
[@ va_qattr]
let va_wp_Nat64Equal (dst:va_operand_reg_opr64) (src:va_operand_reg_opr64) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_is_dst_reg_opr64 dst va_s0 /\ va_is_dst_reg_opr64 src va_s0 /\ va_get_ok va_s0 /\ (forall
(va_x_dst:va_value_reg_opr64) (va_x_src:va_value_reg_opr64) (va_x_efl:Vale.X64.Flags.t) . let
va_sM = va_upd_flags va_x_efl (va_upd_operand_reg_opr64 src va_x_src (va_upd_operand_reg_opr64
dst va_x_dst va_s0)) in va_get_ok va_sM /\ va_if (va_eval_reg_opr64 va_s0 src =
18446744073709551615) (fun _ -> va_eval_reg_opr64 va_sM dst = 0) (fun _ -> va_eval_reg_opr64
va_sM dst = 1) ==> va_k va_sM (())))
val va_wpProof_Nat64Equal : dst:va_operand_reg_opr64 -> src:va_operand_reg_opr64 -> va_s0:va_state
-> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Nat64Equal dst src va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Nat64Equal dst src) ([va_Mod_flags;
va_mod_reg_opr64 src; va_mod_reg_opr64 dst]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Nat64Equal (dst:va_operand_reg_opr64) (src:va_operand_reg_opr64) : (va_quickCode unit
(va_code_Nat64Equal dst src)) =
(va_QProc (va_code_Nat64Equal dst src) ([va_Mod_flags; va_mod_reg_opr64 src; va_mod_reg_opr64
dst]) (va_wp_Nat64Equal dst src) (va_wpProof_Nat64Equal dst src))
//--
//-- Comment
val va_code_Comment : c:string -> Tot va_code
val va_codegen_success_Comment : c:string -> Tot va_pbool
val va_lemma_Comment : va_b0:va_code -> va_s0:va_state -> c:string
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Comment c) va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_state_eq va_sM (va_update_ok va_sM va_s0)))
[@ va_qattr]
let va_wp_Comment (c:string) (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ (let va_sM = va_s0 in va_get_ok va_sM ==> va_k va_sM (())))
val va_wpProof_Comment : c:string -> va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Comment c va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Comment c) ([]) va_s0 va_k ((va_sM,
va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_Comment (c:string) : (va_quickCode unit (va_code_Comment c)) =
(va_QProc (va_code_Comment c) ([]) (va_wp_Comment c) (va_wpProof_Comment c))
//--
//-- LargeComment
val va_code_LargeComment : c:string -> Tot va_code
val va_codegen_success_LargeComment : c:string -> Tot va_pbool
val va_lemma_LargeComment : va_b0:va_code -> va_s0:va_state -> c:string
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_LargeComment c) va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_state_eq va_sM (va_update_ok va_sM va_s0)))
[@ va_qattr]
let va_wp_LargeComment (c:string) (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ (let va_sM = va_s0 in va_get_ok va_sM ==> va_k va_sM (())))
val va_wpProof_LargeComment : c:string -> va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_LargeComment c va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_LargeComment c) ([]) va_s0 va_k
((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_LargeComment (c:string) : (va_quickCode unit (va_code_LargeComment c)) =
(va_QProc (va_code_LargeComment c) ([]) (va_wp_LargeComment c) (va_wpProof_LargeComment c))
//--
//-- NoNewline
val va_code_NoNewline : va_dummy:unit -> Tot va_code
val va_codegen_success_NoNewline : va_dummy:unit -> Tot va_pbool
val va_lemma_NoNewline : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_NoNewline ()) va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_state_eq va_sM (va_update_ok va_sM va_s0)))
[@ va_qattr]
let va_wp_NoNewline (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ (let va_sM = va_s0 in va_get_ok va_sM ==> va_k va_sM (())))
val va_wpProof_NoNewline : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_NoNewline va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_NoNewline ()) ([]) va_s0 va_k ((va_sM,
va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_NoNewline () : (va_quickCode unit (va_code_NoNewline ())) =
(va_QProc (va_code_NoNewline ()) ([]) va_wp_NoNewline va_wpProof_NoNewline)
//--
//-- Newline
val va_code_Newline : va_dummy:unit -> Tot va_code
val va_codegen_success_Newline : va_dummy:unit -> Tot va_pbool
val va_lemma_Newline : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Newline ()) va_s0 /\ va_get_ok va_s0))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_state_eq va_sM (va_update_ok va_sM va_s0)))
[@ va_qattr]
let va_wp_Newline (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ (let va_sM = va_s0 in va_get_ok va_sM ==> va_k va_sM (())))
val va_wpProof_Newline : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Newline va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Newline ()) ([]) va_s0 va_k ((va_sM,
va_f0, va_g))))
[@ "opaque_to_smt" va_qattr] | false | false | Vale.X64.InsBasic.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val va_quick_Newline: Prims.unit -> (va_quickCode unit (va_code_Newline ())) | [] | Vale.X64.InsBasic.va_quick_Newline | {
"file_name": "obj/Vale.X64.InsBasic.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | _: Prims.unit -> Vale.X64.QuickCode.va_quickCode Prims.unit (Vale.X64.InsBasic.va_code_Newline ()) | {
"end_col": 71,
"end_line": 1293,
"start_col": 2,
"start_line": 1293
} |
Prims.Tot | val cs:S.ciphersuite | [
{
"abbrev": true,
"full_module": "Spec.Agile.Hash",
"short_module": "Hash"
},
{
"abbrev": true,
"full_module": "Spec.Agile.AEAD",
"short_module": "AEAD"
},
{
"abbrev": true,
"full_module": "Spec.Agile.DH",
"short_module": "DH"
},
{
"abbrev": true,
"full_module": "Spec.Agile.HPKE",
"short_module": "S"
},
{
"abbrev": false,
"full_module": "Hacl.Impl.HPKE",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.HPKE",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.HPKE",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let cs:S.ciphersuite = (DH.DH_Curve25519, Hash.SHA2_256, S.Seal AEAD.CHACHA20_POLY1305, Hash.SHA2_512) | val cs:S.ciphersuite
let cs:S.ciphersuite = | false | null | false | (DH.DH_Curve25519, Hash.SHA2_256, S.Seal AEAD.CHACHA20_POLY1305, Hash.SHA2_512) | {
"checked_file": "Hacl.HPKE.Curve51_CP128_SHA512.fsti.checked",
"dependencies": [
"Spec.Agile.HPKE.fsti.checked",
"Spec.Agile.Hash.fsti.checked",
"Spec.Agile.DH.fst.checked",
"Spec.Agile.AEAD.fsti.checked",
"prims.fst.checked",
"Hacl.Impl.HPKE.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "Hacl.HPKE.Curve51_CP128_SHA512.fsti"
} | [
"total"
] | [
"FStar.Pervasives.Native.Mktuple4",
"Spec.Agile.DH.algorithm",
"Spec.Agile.HPKE.hash_algorithm",
"Spec.Agile.HPKE.aead",
"Spec.Hash.Definitions.hash_alg",
"Spec.Agile.DH.DH_Curve25519",
"Spec.Hash.Definitions.SHA2_256",
"Spec.Agile.HPKE.Seal",
"Spec.Agile.AEAD.CHACHA20_POLY1305",
"Spec.Hash.Definitions.SHA2_512"
] | [] | module Hacl.HPKE.Curve51_CP128_SHA512
open Hacl.Impl.HPKE
module S = Spec.Agile.HPKE
module DH = Spec.Agile.DH
module AEAD = Spec.Agile.AEAD
module Hash = Spec.Agile.Hash | false | true | Hacl.HPKE.Curve51_CP128_SHA512.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val cs:S.ciphersuite | [] | Hacl.HPKE.Curve51_CP128_SHA512.cs | {
"file_name": "code/hpke/Hacl.HPKE.Curve51_CP128_SHA512.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | Spec.Agile.HPKE.ciphersuite | {
"end_col": 102,
"end_line": 10,
"start_col": 23,
"start_line": 10
} |
Prims.Tot | val trivial_pure_post (a: Type) : pure_post a | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let trivial_pure_post (a: Type) : pure_post a = fun _ -> True | val trivial_pure_post (a: Type) : pure_post a
let trivial_pure_post (a: Type) : pure_post a = | false | null | false | fun _ -> True | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"total"
] | [
"Prims.l_True",
"Prims.pure_post"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *) | false | false | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val trivial_pure_post (a: Type) : pure_post a | [] | FStar.Pervasives.trivial_pure_post | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: Type -> Prims.pure_post a | {
"end_col": 61,
"end_line": 132,
"start_col": 48,
"start_line": 132
} |
FStar.Pervasives.Lemma | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let reveal_opaque (s: string) = norm_spec [delta_only [s]] | let reveal_opaque (s: string) = | false | null | true | norm_spec [delta_only [s]] | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"lemma"
] | [
"Prims.string",
"FStar.Pervasives.norm_spec",
"Prims.Cons",
"FStar.Pervasives.norm_step",
"FStar.Pervasives.delta_only",
"Prims.Nil",
"Prims.unit",
"Prims.l_True",
"Prims.squash",
"Prims.eq2",
"FStar.Pervasives.norm",
"FStar.Pervasives.pattern"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the | false | false | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val reveal_opaque : s: Prims.string -> x: _
-> FStar.Pervasives.Lemma (ensures FStar.Pervasives.norm [FStar.Pervasives.delta_only [s]] x == x) | [] | FStar.Pervasives.reveal_opaque | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | s: Prims.string -> x: _
-> FStar.Pervasives.Lemma (ensures FStar.Pervasives.norm [FStar.Pervasives.delta_only [s]] x == x) | {
"end_col": 58,
"end_line": 330,
"start_col": 32,
"start_line": 330
} |
|
Prims.Tot | val pure_return (a: Type) (x: a) : pure_wp a | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x | val pure_return (a: Type) (x: a) : pure_wp a
let pure_return (a: Type) (x: a) : pure_wp a = | false | null | false | reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"total"
] | [
"Prims.pure_return0",
"Prims.unit",
"FStar.Pervasives.reveal_opaque",
"Prims.pure_wp'",
"Prims.logical",
"Prims.pure_wp_monotonic",
"Prims.pure_wp"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold | false | false | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val pure_return (a: Type) (x: a) : pure_wp a | [] | FStar.Pervasives.pure_return | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: Type -> x: a -> Prims.pure_wp a | {
"end_col": 18,
"end_line": 338,
"start_col": 2,
"start_line": 337
} |
Prims.Tot | val pure_ite_wp (a: Type) (wp: pure_wp a) : Tot (pure_wp a) | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp | val pure_ite_wp (a: Type) (wp: pure_wp a) : Tot (pure_wp a)
let pure_ite_wp (a: Type) (wp: pure_wp a) : Tot (pure_wp a) = | false | null | false | reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"total"
] | [
"Prims.pure_wp",
"Prims.pure_ite_wp0",
"Prims.unit",
"FStar.Pervasives.reveal_opaque",
"Prims.pure_wp'",
"Prims.logical",
"Prims.pure_wp_monotonic"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold | false | false | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val pure_ite_wp (a: Type) (wp: pure_wp a) : Tot (pure_wp a) | [] | FStar.Pervasives.pure_ite_wp | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: Type -> wp: Prims.pure_wp a -> Prims.pure_wp a | {
"end_col": 19,
"end_line": 353,
"start_col": 2,
"start_line": 352
} |
Prims.Tot | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let st_bind_wp
(heap: Type)
(a b: Type)
(wp1: st_wp_h heap a)
(wp2: (a -> GTot (st_wp_h heap b)))
(p: st_post_h heap b)
(h0: heap)
= wp1 (fun a h1 -> wp2 a p h1) h0 | let st_bind_wp
(heap a b: Type)
(wp1: st_wp_h heap a)
(wp2: (a -> GTot (st_wp_h heap b)))
(p: st_post_h heap b)
(h0: heap)
= | false | null | false | wp1 (fun a h1 -> wp2 a p h1) h0 | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"total"
] | [
"FStar.Pervasives.st_wp_h",
"FStar.Pervasives.st_post_h",
"Prims.l_True"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold
let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp
unfold
let pure_close_wp (a b:Type) (wp:b -> Tot (pure_wp a)) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp
unfold
let pure_null_wp (a:Type) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_null_wp0 a
[@@ "opaque_to_smt"]
unfold
let pure_assert_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assert_wp0 p
[@@ "opaque_to_smt"]
unfold
let pure_assume_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assume_wp0 p
/// The [DIV] effect for divergent computations
///
/// The wp-calculus for [DIV] is same as that of [PURE]
(** The effect of divergence: from a specificational perspective it is
identical to PURE, however the specs are given a partial
correctness interpretation. Computations with the [DIV] effect may
not terminate. *)
new_effect {
DIV : a:Type -> wp:pure_wp a -> Effect
with
return_wp = pure_return
; bind_wp = pure_bind_wp
; if_then_else = pure_if_then_else
; ite_wp = pure_ite_wp
; stronger = pure_stronger
; close_wp = pure_close_wp
; trivial = pure_trivial
}
(** [PURE] computations can be silently promoted for use in a [DIV] context *)
sub_effect PURE ~> DIV { lift_wp = purewp_id }
(** [Div] is the Hoare-style counterpart of the wp-indexed [DIV] *)
unfold
let div_hoare_to_wp (#a:Type) (#pre:pure_pre) (post:pure_post' a pre) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
fun (p:pure_post a) -> pre /\ (forall a. post a ==> p a)
effect Div (a: Type) (pre: pure_pre) (post: pure_post' a pre) =
DIV a (div_hoare_to_wp post)
(** [Dv] is the instance of [DIV] with trivial pre- and postconditions *)
effect Dv (a: Type) = DIV a (pure_null_wp a)
(** We use the [EXT] effect to underspecify external system calls
as being impure but having no observable effect on the state *)
effect EXT (a: Type) = Dv a
/// The [STATE_h] effect template for stateful computations, generic
/// in the type of the state.
///
/// Note, [STATE_h] is itself not a computation type in F*, since it
/// is parameterized by the type of heap. However, instantiations of
/// [STATE_h] with specific types of the heap are computation
/// types. See, e.g., [FStar.ST] for such instantiations.
///
/// Weakest preconditions for stateful computations transform
/// [st_post_h] postconditions to [st_pre_h] preconditions. Both are
/// parametric in the type of the state, here denoted by the
/// [heap:Type] variable.
(** Preconditions are predicates on the [heap] *)
let st_pre_h (heap: Type) = heap -> GTot Type0
(** Postconditions relate [a]-typed results to the final [heap], here
refined by some pure proposition [pre], typically instantiated to
the precondition applied to the initial [heap] *)
let st_post_h' (heap a pre: Type) = a -> _: heap{pre} -> GTot Type0
(** Postconditions without refinements *)
let st_post_h (heap a: Type) = st_post_h' heap a True
(** The type of the main WP-transformer for stateful computations *)
let st_wp_h (heap a: Type) = st_post_h heap a -> Tot (st_pre_h heap)
(** Returning a value does not transform the state *)
unfold
let st_return (heap a: Type) (x: a) (p: st_post_h heap a) = p x
(** Sequential composition of stateful WPs *)
unfold
let st_bind_wp
(heap: Type)
(a b: Type)
(wp1: st_wp_h heap a)
(wp2: (a -> GTot (st_wp_h heap b)))
(p: st_post_h heap b) | false | false | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val st_bind_wp : heap: Type ->
a: Type ->
b: Type ->
wp1: FStar.Pervasives.st_wp_h heap a ->
wp2: (_: a -> Prims.GTot (FStar.Pervasives.st_wp_h heap b)) ->
p: FStar.Pervasives.st_post_h heap b ->
h0: heap
-> Type0 | [] | FStar.Pervasives.st_bind_wp | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
heap: Type ->
a: Type ->
b: Type ->
wp1: FStar.Pervasives.st_wp_h heap a ->
wp2: (_: a -> Prims.GTot (FStar.Pervasives.st_wp_h heap b)) ->
p: FStar.Pervasives.st_post_h heap b ->
h0: heap
-> Type0 | {
"end_col": 38,
"end_line": 460,
"start_col": 7,
"start_line": 460
} |
|
Prims.Tot | val id (#a: Type) (x: a) : a | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let id (#a: Type) (x: a) : a = x | val id (#a: Type) (x: a) : a
let id (#a: Type) (x: a) : a = | false | null | false | x | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"total"
] | [] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *) | false | false | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val id (#a: Type) (x: a) : a | [] | FStar.Pervasives.id | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | x: a -> a | {
"end_col": 32,
"end_line": 128,
"start_col": 31,
"start_line": 128
} |
Prims.Tot | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let st_wp_h (heap a: Type) = st_post_h heap a -> Tot (st_pre_h heap) | let st_wp_h (heap a: Type) = | false | null | false | st_post_h heap a -> Tot (st_pre_h heap) | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"total"
] | [
"FStar.Pervasives.st_post_h",
"FStar.Pervasives.st_pre_h"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold
let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp
unfold
let pure_close_wp (a b:Type) (wp:b -> Tot (pure_wp a)) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp
unfold
let pure_null_wp (a:Type) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_null_wp0 a
[@@ "opaque_to_smt"]
unfold
let pure_assert_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assert_wp0 p
[@@ "opaque_to_smt"]
unfold
let pure_assume_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assume_wp0 p
/// The [DIV] effect for divergent computations
///
/// The wp-calculus for [DIV] is same as that of [PURE]
(** The effect of divergence: from a specificational perspective it is
identical to PURE, however the specs are given a partial
correctness interpretation. Computations with the [DIV] effect may
not terminate. *)
new_effect {
DIV : a:Type -> wp:pure_wp a -> Effect
with
return_wp = pure_return
; bind_wp = pure_bind_wp
; if_then_else = pure_if_then_else
; ite_wp = pure_ite_wp
; stronger = pure_stronger
; close_wp = pure_close_wp
; trivial = pure_trivial
}
(** [PURE] computations can be silently promoted for use in a [DIV] context *)
sub_effect PURE ~> DIV { lift_wp = purewp_id }
(** [Div] is the Hoare-style counterpart of the wp-indexed [DIV] *)
unfold
let div_hoare_to_wp (#a:Type) (#pre:pure_pre) (post:pure_post' a pre) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
fun (p:pure_post a) -> pre /\ (forall a. post a ==> p a)
effect Div (a: Type) (pre: pure_pre) (post: pure_post' a pre) =
DIV a (div_hoare_to_wp post)
(** [Dv] is the instance of [DIV] with trivial pre- and postconditions *)
effect Dv (a: Type) = DIV a (pure_null_wp a)
(** We use the [EXT] effect to underspecify external system calls
as being impure but having no observable effect on the state *)
effect EXT (a: Type) = Dv a
/// The [STATE_h] effect template for stateful computations, generic
/// in the type of the state.
///
/// Note, [STATE_h] is itself not a computation type in F*, since it
/// is parameterized by the type of heap. However, instantiations of
/// [STATE_h] with specific types of the heap are computation
/// types. See, e.g., [FStar.ST] for such instantiations.
///
/// Weakest preconditions for stateful computations transform
/// [st_post_h] postconditions to [st_pre_h] preconditions. Both are
/// parametric in the type of the state, here denoted by the
/// [heap:Type] variable.
(** Preconditions are predicates on the [heap] *)
let st_pre_h (heap: Type) = heap -> GTot Type0
(** Postconditions relate [a]-typed results to the final [heap], here
refined by some pure proposition [pre], typically instantiated to
the precondition applied to the initial [heap] *)
let st_post_h' (heap a pre: Type) = a -> _: heap{pre} -> GTot Type0
(** Postconditions without refinements *)
let st_post_h (heap a: Type) = st_post_h' heap a True | false | true | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val st_wp_h : heap: Type -> a: Type -> Type | [] | FStar.Pervasives.st_wp_h | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | heap: Type -> a: Type -> Type | {
"end_col": 68,
"end_line": 445,
"start_col": 29,
"start_line": 445
} |
|
Prims.Tot | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let st_pre_h (heap: Type) = heap -> GTot Type0 | let st_pre_h (heap: Type) = | false | null | false | heap -> GTot Type0 | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"total"
] | [] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold
let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp
unfold
let pure_close_wp (a b:Type) (wp:b -> Tot (pure_wp a)) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp
unfold
let pure_null_wp (a:Type) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_null_wp0 a
[@@ "opaque_to_smt"]
unfold
let pure_assert_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assert_wp0 p
[@@ "opaque_to_smt"]
unfold
let pure_assume_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assume_wp0 p
/// The [DIV] effect for divergent computations
///
/// The wp-calculus for [DIV] is same as that of [PURE]
(** The effect of divergence: from a specificational perspective it is
identical to PURE, however the specs are given a partial
correctness interpretation. Computations with the [DIV] effect may
not terminate. *)
new_effect {
DIV : a:Type -> wp:pure_wp a -> Effect
with
return_wp = pure_return
; bind_wp = pure_bind_wp
; if_then_else = pure_if_then_else
; ite_wp = pure_ite_wp
; stronger = pure_stronger
; close_wp = pure_close_wp
; trivial = pure_trivial
}
(** [PURE] computations can be silently promoted for use in a [DIV] context *)
sub_effect PURE ~> DIV { lift_wp = purewp_id }
(** [Div] is the Hoare-style counterpart of the wp-indexed [DIV] *)
unfold
let div_hoare_to_wp (#a:Type) (#pre:pure_pre) (post:pure_post' a pre) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
fun (p:pure_post a) -> pre /\ (forall a. post a ==> p a)
effect Div (a: Type) (pre: pure_pre) (post: pure_post' a pre) =
DIV a (div_hoare_to_wp post)
(** [Dv] is the instance of [DIV] with trivial pre- and postconditions *)
effect Dv (a: Type) = DIV a (pure_null_wp a)
(** We use the [EXT] effect to underspecify external system calls
as being impure but having no observable effect on the state *)
effect EXT (a: Type) = Dv a
/// The [STATE_h] effect template for stateful computations, generic
/// in the type of the state.
///
/// Note, [STATE_h] is itself not a computation type in F*, since it
/// is parameterized by the type of heap. However, instantiations of
/// [STATE_h] with specific types of the heap are computation
/// types. See, e.g., [FStar.ST] for such instantiations.
///
/// Weakest preconditions for stateful computations transform
/// [st_post_h] postconditions to [st_pre_h] preconditions. Both are
/// parametric in the type of the state, here denoted by the
/// [heap:Type] variable. | false | true | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val st_pre_h : heap: Type -> Type | [] | FStar.Pervasives.st_pre_h | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | heap: Type -> Type | {
"end_col": 46,
"end_line": 434,
"start_col": 28,
"start_line": 434
} |
|
Prims.Tot | val pure_bind_wp (a b: Type) (wp1: pure_wp a) (wp2: (a -> Tot (pure_wp b))) : Tot (pure_wp b) | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2 | val pure_bind_wp (a b: Type) (wp1: pure_wp a) (wp2: (a -> Tot (pure_wp b))) : Tot (pure_wp b)
let pure_bind_wp (a b: Type) (wp1: pure_wp a) (wp2: (a -> Tot (pure_wp b))) : Tot (pure_wp b) = | false | null | false | reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2 | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"total"
] | [
"Prims.pure_wp",
"Prims.pure_bind_wp0",
"Prims.unit",
"FStar.Pervasives.reveal_opaque",
"Prims.pure_wp'",
"Prims.logical",
"Prims.pure_wp_monotonic"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold | false | false | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val pure_bind_wp (a b: Type) (wp1: pure_wp a) (wp2: (a -> Tot (pure_wp b))) : Tot (pure_wp b) | [] | FStar.Pervasives.pure_bind_wp | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: Type -> b: Type -> wp1: Prims.pure_wp a -> wp2: (_: a -> Prims.pure_wp b) -> Prims.pure_wp b | {
"end_col": 27,
"end_line": 343,
"start_col": 2,
"start_line": 342
} |
Prims.Tot | val pure_null_wp (a: Type) : Tot (pure_wp a) | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let pure_null_wp (a:Type) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_null_wp0 a | val pure_null_wp (a: Type) : Tot (pure_wp a)
let pure_null_wp (a: Type) : Tot (pure_wp a) = | false | null | false | reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_null_wp0 a | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"total"
] | [
"Prims.pure_null_wp0",
"Prims.unit",
"FStar.Pervasives.reveal_opaque",
"Prims.pure_wp'",
"Prims.logical",
"Prims.pure_wp_monotonic",
"Prims.pure_wp"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold
let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp
unfold
let pure_close_wp (a b:Type) (wp:b -> Tot (pure_wp a)) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp
unfold | false | false | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val pure_null_wp (a: Type) : Tot (pure_wp a) | [] | FStar.Pervasives.pure_null_wp | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: Type -> Prims.pure_wp a | {
"end_col": 17,
"end_line": 363,
"start_col": 2,
"start_line": 362
} |
Prims.Tot | val pure_close_wp (a b: Type) (wp: (b -> Tot (pure_wp a))) : Tot (pure_wp a) | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let pure_close_wp (a b:Type) (wp:b -> Tot (pure_wp a)) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp | val pure_close_wp (a b: Type) (wp: (b -> Tot (pure_wp a))) : Tot (pure_wp a)
let pure_close_wp (a b: Type) (wp: (b -> Tot (pure_wp a))) : Tot (pure_wp a) = | false | null | false | reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"total"
] | [
"Prims.pure_wp",
"Prims.pure_close_wp0",
"Prims.unit",
"FStar.Pervasives.reveal_opaque",
"Prims.pure_wp'",
"Prims.logical",
"Prims.pure_wp_monotonic"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold
let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp
unfold | false | false | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val pure_close_wp (a b: Type) (wp: (b -> Tot (pure_wp a))) : Tot (pure_wp a) | [] | FStar.Pervasives.pure_close_wp | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: Type -> b: Type -> wp: (_: b -> Prims.pure_wp a) -> Prims.pure_wp a | {
"end_col": 23,
"end_line": 358,
"start_col": 2,
"start_line": 357
} |
Prims.GTot | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let st_return (heap a: Type) (x: a) (p: st_post_h heap a) = p x | let st_return (heap a: Type) (x: a) (p: st_post_h heap a) = | false | null | false | p x | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"sometrivial"
] | [
"FStar.Pervasives.st_post_h",
"Prims.l_True"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold
let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp
unfold
let pure_close_wp (a b:Type) (wp:b -> Tot (pure_wp a)) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp
unfold
let pure_null_wp (a:Type) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_null_wp0 a
[@@ "opaque_to_smt"]
unfold
let pure_assert_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assert_wp0 p
[@@ "opaque_to_smt"]
unfold
let pure_assume_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assume_wp0 p
/// The [DIV] effect for divergent computations
///
/// The wp-calculus for [DIV] is same as that of [PURE]
(** The effect of divergence: from a specificational perspective it is
identical to PURE, however the specs are given a partial
correctness interpretation. Computations with the [DIV] effect may
not terminate. *)
new_effect {
DIV : a:Type -> wp:pure_wp a -> Effect
with
return_wp = pure_return
; bind_wp = pure_bind_wp
; if_then_else = pure_if_then_else
; ite_wp = pure_ite_wp
; stronger = pure_stronger
; close_wp = pure_close_wp
; trivial = pure_trivial
}
(** [PURE] computations can be silently promoted for use in a [DIV] context *)
sub_effect PURE ~> DIV { lift_wp = purewp_id }
(** [Div] is the Hoare-style counterpart of the wp-indexed [DIV] *)
unfold
let div_hoare_to_wp (#a:Type) (#pre:pure_pre) (post:pure_post' a pre) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
fun (p:pure_post a) -> pre /\ (forall a. post a ==> p a)
effect Div (a: Type) (pre: pure_pre) (post: pure_post' a pre) =
DIV a (div_hoare_to_wp post)
(** [Dv] is the instance of [DIV] with trivial pre- and postconditions *)
effect Dv (a: Type) = DIV a (pure_null_wp a)
(** We use the [EXT] effect to underspecify external system calls
as being impure but having no observable effect on the state *)
effect EXT (a: Type) = Dv a
/// The [STATE_h] effect template for stateful computations, generic
/// in the type of the state.
///
/// Note, [STATE_h] is itself not a computation type in F*, since it
/// is parameterized by the type of heap. However, instantiations of
/// [STATE_h] with specific types of the heap are computation
/// types. See, e.g., [FStar.ST] for such instantiations.
///
/// Weakest preconditions for stateful computations transform
/// [st_post_h] postconditions to [st_pre_h] preconditions. Both are
/// parametric in the type of the state, here denoted by the
/// [heap:Type] variable.
(** Preconditions are predicates on the [heap] *)
let st_pre_h (heap: Type) = heap -> GTot Type0
(** Postconditions relate [a]-typed results to the final [heap], here
refined by some pure proposition [pre], typically instantiated to
the precondition applied to the initial [heap] *)
let st_post_h' (heap a pre: Type) = a -> _: heap{pre} -> GTot Type0
(** Postconditions without refinements *)
let st_post_h (heap a: Type) = st_post_h' heap a True
(** The type of the main WP-transformer for stateful computations *)
let st_wp_h (heap a: Type) = st_post_h heap a -> Tot (st_pre_h heap)
(** Returning a value does not transform the state *) | false | false | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val st_return : heap: Type -> a: Type -> x: a -> p: FStar.Pervasives.st_post_h heap a -> _: heap{Prims.l_True}
-> Prims.GTot Type0 | [] | FStar.Pervasives.st_return | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | heap: Type -> a: Type -> x: a -> p: FStar.Pervasives.st_post_h heap a -> _: heap{Prims.l_True}
-> Prims.GTot Type0 | {
"end_col": 63,
"end_line": 449,
"start_col": 60,
"start_line": 449
} |
|
Prims.Tot | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let st_stronger (heap a: Type) (wp1 wp2: st_wp_h heap a) =
(forall (p: st_post_h heap a) (h: heap). wp1 p h ==> wp2 p h) | let st_stronger (heap a: Type) (wp1 wp2: st_wp_h heap a) = | false | null | false | (forall (p: st_post_h heap a) (h: heap). wp1 p h ==> wp2 p h) | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"total"
] | [
"FStar.Pervasives.st_wp_h",
"Prims.l_Forall",
"FStar.Pervasives.st_post_h",
"Prims.l_imp",
"Prims.logical"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold
let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp
unfold
let pure_close_wp (a b:Type) (wp:b -> Tot (pure_wp a)) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp
unfold
let pure_null_wp (a:Type) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_null_wp0 a
[@@ "opaque_to_smt"]
unfold
let pure_assert_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assert_wp0 p
[@@ "opaque_to_smt"]
unfold
let pure_assume_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assume_wp0 p
/// The [DIV] effect for divergent computations
///
/// The wp-calculus for [DIV] is same as that of [PURE]
(** The effect of divergence: from a specificational perspective it is
identical to PURE, however the specs are given a partial
correctness interpretation. Computations with the [DIV] effect may
not terminate. *)
new_effect {
DIV : a:Type -> wp:pure_wp a -> Effect
with
return_wp = pure_return
; bind_wp = pure_bind_wp
; if_then_else = pure_if_then_else
; ite_wp = pure_ite_wp
; stronger = pure_stronger
; close_wp = pure_close_wp
; trivial = pure_trivial
}
(** [PURE] computations can be silently promoted for use in a [DIV] context *)
sub_effect PURE ~> DIV { lift_wp = purewp_id }
(** [Div] is the Hoare-style counterpart of the wp-indexed [DIV] *)
unfold
let div_hoare_to_wp (#a:Type) (#pre:pure_pre) (post:pure_post' a pre) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
fun (p:pure_post a) -> pre /\ (forall a. post a ==> p a)
effect Div (a: Type) (pre: pure_pre) (post: pure_post' a pre) =
DIV a (div_hoare_to_wp post)
(** [Dv] is the instance of [DIV] with trivial pre- and postconditions *)
effect Dv (a: Type) = DIV a (pure_null_wp a)
(** We use the [EXT] effect to underspecify external system calls
as being impure but having no observable effect on the state *)
effect EXT (a: Type) = Dv a
/// The [STATE_h] effect template for stateful computations, generic
/// in the type of the state.
///
/// Note, [STATE_h] is itself not a computation type in F*, since it
/// is parameterized by the type of heap. However, instantiations of
/// [STATE_h] with specific types of the heap are computation
/// types. See, e.g., [FStar.ST] for such instantiations.
///
/// Weakest preconditions for stateful computations transform
/// [st_post_h] postconditions to [st_pre_h] preconditions. Both are
/// parametric in the type of the state, here denoted by the
/// [heap:Type] variable.
(** Preconditions are predicates on the [heap] *)
let st_pre_h (heap: Type) = heap -> GTot Type0
(** Postconditions relate [a]-typed results to the final [heap], here
refined by some pure proposition [pre], typically instantiated to
the precondition applied to the initial [heap] *)
let st_post_h' (heap a pre: Type) = a -> _: heap{pre} -> GTot Type0
(** Postconditions without refinements *)
let st_post_h (heap a: Type) = st_post_h' heap a True
(** The type of the main WP-transformer for stateful computations *)
let st_wp_h (heap a: Type) = st_post_h heap a -> Tot (st_pre_h heap)
(** Returning a value does not transform the state *)
unfold
let st_return (heap a: Type) (x: a) (p: st_post_h heap a) = p x
(** Sequential composition of stateful WPs *)
unfold
let st_bind_wp
(heap: Type)
(a b: Type)
(wp1: st_wp_h heap a)
(wp2: (a -> GTot (st_wp_h heap b)))
(p: st_post_h heap b)
(h0: heap)
= wp1 (fun a h1 -> wp2 a p h1) h0
(** Branching for stateful WPs *)
unfold
let st_if_then_else
(heap a p: Type)
(wp_then wp_else: st_wp_h heap a)
(post: st_post_h heap a)
(h0: heap)
= wp_then post h0 /\ (~p ==> wp_else post h0)
(** As with [PURE] the [wp] combinator names the postcondition as
[k] to avoid duplicating it. *)
unfold
let st_ite_wp (heap a: Type) (wp: st_wp_h heap a) (post: st_post_h heap a) (h0: heap) =
forall (k: st_post_h heap a).
(forall (x: a) (h: heap). {:pattern (guard_free (k x h))} post x h ==> k x h) ==> wp k h0
(** Subsumption for stateful WPs *)
unfold | false | false | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val st_stronger : heap: Type ->
a: Type ->
wp1: FStar.Pervasives.st_wp_h heap a ->
wp2: FStar.Pervasives.st_wp_h heap a
-> Prims.logical | [] | FStar.Pervasives.st_stronger | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
heap: Type ->
a: Type ->
wp1: FStar.Pervasives.st_wp_h heap a ->
wp2: FStar.Pervasives.st_wp_h heap a
-> Prims.logical | {
"end_col": 63,
"end_line": 481,
"start_col": 2,
"start_line": 481
} |
|
Prims.Tot | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let st_post_h (heap a: Type) = st_post_h' heap a True | let st_post_h (heap a: Type) = | false | null | false | st_post_h' heap a True | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"total"
] | [
"FStar.Pervasives.st_post_h'",
"Prims.l_True"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold
let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp
unfold
let pure_close_wp (a b:Type) (wp:b -> Tot (pure_wp a)) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp
unfold
let pure_null_wp (a:Type) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_null_wp0 a
[@@ "opaque_to_smt"]
unfold
let pure_assert_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assert_wp0 p
[@@ "opaque_to_smt"]
unfold
let pure_assume_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assume_wp0 p
/// The [DIV] effect for divergent computations
///
/// The wp-calculus for [DIV] is same as that of [PURE]
(** The effect of divergence: from a specificational perspective it is
identical to PURE, however the specs are given a partial
correctness interpretation. Computations with the [DIV] effect may
not terminate. *)
new_effect {
DIV : a:Type -> wp:pure_wp a -> Effect
with
return_wp = pure_return
; bind_wp = pure_bind_wp
; if_then_else = pure_if_then_else
; ite_wp = pure_ite_wp
; stronger = pure_stronger
; close_wp = pure_close_wp
; trivial = pure_trivial
}
(** [PURE] computations can be silently promoted for use in a [DIV] context *)
sub_effect PURE ~> DIV { lift_wp = purewp_id }
(** [Div] is the Hoare-style counterpart of the wp-indexed [DIV] *)
unfold
let div_hoare_to_wp (#a:Type) (#pre:pure_pre) (post:pure_post' a pre) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
fun (p:pure_post a) -> pre /\ (forall a. post a ==> p a)
effect Div (a: Type) (pre: pure_pre) (post: pure_post' a pre) =
DIV a (div_hoare_to_wp post)
(** [Dv] is the instance of [DIV] with trivial pre- and postconditions *)
effect Dv (a: Type) = DIV a (pure_null_wp a)
(** We use the [EXT] effect to underspecify external system calls
as being impure but having no observable effect on the state *)
effect EXT (a: Type) = Dv a
/// The [STATE_h] effect template for stateful computations, generic
/// in the type of the state.
///
/// Note, [STATE_h] is itself not a computation type in F*, since it
/// is parameterized by the type of heap. However, instantiations of
/// [STATE_h] with specific types of the heap are computation
/// types. See, e.g., [FStar.ST] for such instantiations.
///
/// Weakest preconditions for stateful computations transform
/// [st_post_h] postconditions to [st_pre_h] preconditions. Both are
/// parametric in the type of the state, here denoted by the
/// [heap:Type] variable.
(** Preconditions are predicates on the [heap] *)
let st_pre_h (heap: Type) = heap -> GTot Type0
(** Postconditions relate [a]-typed results to the final [heap], here
refined by some pure proposition [pre], typically instantiated to
the precondition applied to the initial [heap] *)
let st_post_h' (heap a pre: Type) = a -> _: heap{pre} -> GTot Type0 | false | true | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val st_post_h : heap: Type -> a: Type -> Type | [] | FStar.Pervasives.st_post_h | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | heap: Type -> a: Type -> Type | {
"end_col": 53,
"end_line": 442,
"start_col": 31,
"start_line": 442
} |
|
Prims.Tot | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let st_post_h' (heap a pre: Type) = a -> _: heap{pre} -> GTot Type0 | let st_post_h' (heap a pre: Type) = | false | null | false | a -> _: heap{pre} -> GTot Type0 | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"total"
] | [] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold
let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp
unfold
let pure_close_wp (a b:Type) (wp:b -> Tot (pure_wp a)) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp
unfold
let pure_null_wp (a:Type) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_null_wp0 a
[@@ "opaque_to_smt"]
unfold
let pure_assert_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assert_wp0 p
[@@ "opaque_to_smt"]
unfold
let pure_assume_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assume_wp0 p
/// The [DIV] effect for divergent computations
///
/// The wp-calculus for [DIV] is same as that of [PURE]
(** The effect of divergence: from a specificational perspective it is
identical to PURE, however the specs are given a partial
correctness interpretation. Computations with the [DIV] effect may
not terminate. *)
new_effect {
DIV : a:Type -> wp:pure_wp a -> Effect
with
return_wp = pure_return
; bind_wp = pure_bind_wp
; if_then_else = pure_if_then_else
; ite_wp = pure_ite_wp
; stronger = pure_stronger
; close_wp = pure_close_wp
; trivial = pure_trivial
}
(** [PURE] computations can be silently promoted for use in a [DIV] context *)
sub_effect PURE ~> DIV { lift_wp = purewp_id }
(** [Div] is the Hoare-style counterpart of the wp-indexed [DIV] *)
unfold
let div_hoare_to_wp (#a:Type) (#pre:pure_pre) (post:pure_post' a pre) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
fun (p:pure_post a) -> pre /\ (forall a. post a ==> p a)
effect Div (a: Type) (pre: pure_pre) (post: pure_post' a pre) =
DIV a (div_hoare_to_wp post)
(** [Dv] is the instance of [DIV] with trivial pre- and postconditions *)
effect Dv (a: Type) = DIV a (pure_null_wp a)
(** We use the [EXT] effect to underspecify external system calls
as being impure but having no observable effect on the state *)
effect EXT (a: Type) = Dv a
/// The [STATE_h] effect template for stateful computations, generic
/// in the type of the state.
///
/// Note, [STATE_h] is itself not a computation type in F*, since it
/// is parameterized by the type of heap. However, instantiations of
/// [STATE_h] with specific types of the heap are computation
/// types. See, e.g., [FStar.ST] for such instantiations.
///
/// Weakest preconditions for stateful computations transform
/// [st_post_h] postconditions to [st_pre_h] preconditions. Both are
/// parametric in the type of the state, here denoted by the
/// [heap:Type] variable.
(** Preconditions are predicates on the [heap] *)
let st_pre_h (heap: Type) = heap -> GTot Type0
(** Postconditions relate [a]-typed results to the final [heap], here
refined by some pure proposition [pre], typically instantiated to | false | true | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val st_post_h' : heap: Type -> a: Type -> pre: Type -> Type | [] | FStar.Pervasives.st_post_h' | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | heap: Type -> a: Type -> pre: Type -> Type | {
"end_col": 67,
"end_line": 439,
"start_col": 36,
"start_line": 439
} |
|
Prims.Tot | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let st_trivial (heap a: Type) (wp: st_wp_h heap a) = (forall h0. wp (fun r h1 -> True) h0) | let st_trivial (heap a: Type) (wp: st_wp_h heap a) = | false | null | false | (forall h0. wp (fun r h1 -> True) h0) | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"total"
] | [
"FStar.Pervasives.st_wp_h",
"Prims.l_Forall",
"Prims.l_True",
"Prims.logical"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold
let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp
unfold
let pure_close_wp (a b:Type) (wp:b -> Tot (pure_wp a)) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp
unfold
let pure_null_wp (a:Type) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_null_wp0 a
[@@ "opaque_to_smt"]
unfold
let pure_assert_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assert_wp0 p
[@@ "opaque_to_smt"]
unfold
let pure_assume_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assume_wp0 p
/// The [DIV] effect for divergent computations
///
/// The wp-calculus for [DIV] is same as that of [PURE]
(** The effect of divergence: from a specificational perspective it is
identical to PURE, however the specs are given a partial
correctness interpretation. Computations with the [DIV] effect may
not terminate. *)
new_effect {
DIV : a:Type -> wp:pure_wp a -> Effect
with
return_wp = pure_return
; bind_wp = pure_bind_wp
; if_then_else = pure_if_then_else
; ite_wp = pure_ite_wp
; stronger = pure_stronger
; close_wp = pure_close_wp
; trivial = pure_trivial
}
(** [PURE] computations can be silently promoted for use in a [DIV] context *)
sub_effect PURE ~> DIV { lift_wp = purewp_id }
(** [Div] is the Hoare-style counterpart of the wp-indexed [DIV] *)
unfold
let div_hoare_to_wp (#a:Type) (#pre:pure_pre) (post:pure_post' a pre) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
fun (p:pure_post a) -> pre /\ (forall a. post a ==> p a)
effect Div (a: Type) (pre: pure_pre) (post: pure_post' a pre) =
DIV a (div_hoare_to_wp post)
(** [Dv] is the instance of [DIV] with trivial pre- and postconditions *)
effect Dv (a: Type) = DIV a (pure_null_wp a)
(** We use the [EXT] effect to underspecify external system calls
as being impure but having no observable effect on the state *)
effect EXT (a: Type) = Dv a
/// The [STATE_h] effect template for stateful computations, generic
/// in the type of the state.
///
/// Note, [STATE_h] is itself not a computation type in F*, since it
/// is parameterized by the type of heap. However, instantiations of
/// [STATE_h] with specific types of the heap are computation
/// types. See, e.g., [FStar.ST] for such instantiations.
///
/// Weakest preconditions for stateful computations transform
/// [st_post_h] postconditions to [st_pre_h] preconditions. Both are
/// parametric in the type of the state, here denoted by the
/// [heap:Type] variable.
(** Preconditions are predicates on the [heap] *)
let st_pre_h (heap: Type) = heap -> GTot Type0
(** Postconditions relate [a]-typed results to the final [heap], here
refined by some pure proposition [pre], typically instantiated to
the precondition applied to the initial [heap] *)
let st_post_h' (heap a pre: Type) = a -> _: heap{pre} -> GTot Type0
(** Postconditions without refinements *)
let st_post_h (heap a: Type) = st_post_h' heap a True
(** The type of the main WP-transformer for stateful computations *)
let st_wp_h (heap a: Type) = st_post_h heap a -> Tot (st_pre_h heap)
(** Returning a value does not transform the state *)
unfold
let st_return (heap a: Type) (x: a) (p: st_post_h heap a) = p x
(** Sequential composition of stateful WPs *)
unfold
let st_bind_wp
(heap: Type)
(a b: Type)
(wp1: st_wp_h heap a)
(wp2: (a -> GTot (st_wp_h heap b)))
(p: st_post_h heap b)
(h0: heap)
= wp1 (fun a h1 -> wp2 a p h1) h0
(** Branching for stateful WPs *)
unfold
let st_if_then_else
(heap a p: Type)
(wp_then wp_else: st_wp_h heap a)
(post: st_post_h heap a)
(h0: heap)
= wp_then post h0 /\ (~p ==> wp_else post h0)
(** As with [PURE] the [wp] combinator names the postcondition as
[k] to avoid duplicating it. *)
unfold
let st_ite_wp (heap a: Type) (wp: st_wp_h heap a) (post: st_post_h heap a) (h0: heap) =
forall (k: st_post_h heap a).
(forall (x: a) (h: heap). {:pattern (guard_free (k x h))} post x h ==> k x h) ==> wp k h0
(** Subsumption for stateful WPs *)
unfold
let st_stronger (heap a: Type) (wp1 wp2: st_wp_h heap a) =
(forall (p: st_post_h heap a) (h: heap). wp1 p h ==> wp2 p h)
(** Closing the scope of a binder within a stateful WP *)
unfold
let st_close_wp (heap a b: Type) (wp: (b -> GTot (st_wp_h heap a))) (p: st_post_h heap a) (h: heap) =
(forall (b: b). wp b p h)
(** Applying a stateful WP to a trivial postcondition *) | false | false | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val st_trivial : heap: Type -> a: Type -> wp: FStar.Pervasives.st_wp_h heap a -> Prims.logical | [] | FStar.Pervasives.st_trivial | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | heap: Type -> a: Type -> wp: FStar.Pervasives.st_wp_h heap a -> Prims.logical | {
"end_col": 90,
"end_line": 490,
"start_col": 53,
"start_line": 490
} |
|
Prims.Tot | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let ex_wp (a: Type) = ex_post a -> GTot ex_pre | let ex_wp (a: Type) = | false | null | false | ex_post a -> GTot ex_pre | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"total"
] | [
"FStar.Pervasives.ex_post",
"FStar.Pervasives.ex_pre"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold
let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp
unfold
let pure_close_wp (a b:Type) (wp:b -> Tot (pure_wp a)) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp
unfold
let pure_null_wp (a:Type) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_null_wp0 a
[@@ "opaque_to_smt"]
unfold
let pure_assert_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assert_wp0 p
[@@ "opaque_to_smt"]
unfold
let pure_assume_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assume_wp0 p
/// The [DIV] effect for divergent computations
///
/// The wp-calculus for [DIV] is same as that of [PURE]
(** The effect of divergence: from a specificational perspective it is
identical to PURE, however the specs are given a partial
correctness interpretation. Computations with the [DIV] effect may
not terminate. *)
new_effect {
DIV : a:Type -> wp:pure_wp a -> Effect
with
return_wp = pure_return
; bind_wp = pure_bind_wp
; if_then_else = pure_if_then_else
; ite_wp = pure_ite_wp
; stronger = pure_stronger
; close_wp = pure_close_wp
; trivial = pure_trivial
}
(** [PURE] computations can be silently promoted for use in a [DIV] context *)
sub_effect PURE ~> DIV { lift_wp = purewp_id }
(** [Div] is the Hoare-style counterpart of the wp-indexed [DIV] *)
unfold
let div_hoare_to_wp (#a:Type) (#pre:pure_pre) (post:pure_post' a pre) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
fun (p:pure_post a) -> pre /\ (forall a. post a ==> p a)
effect Div (a: Type) (pre: pure_pre) (post: pure_post' a pre) =
DIV a (div_hoare_to_wp post)
(** [Dv] is the instance of [DIV] with trivial pre- and postconditions *)
effect Dv (a: Type) = DIV a (pure_null_wp a)
(** We use the [EXT] effect to underspecify external system calls
as being impure but having no observable effect on the state *)
effect EXT (a: Type) = Dv a
/// The [STATE_h] effect template for stateful computations, generic
/// in the type of the state.
///
/// Note, [STATE_h] is itself not a computation type in F*, since it
/// is parameterized by the type of heap. However, instantiations of
/// [STATE_h] with specific types of the heap are computation
/// types. See, e.g., [FStar.ST] for such instantiations.
///
/// Weakest preconditions for stateful computations transform
/// [st_post_h] postconditions to [st_pre_h] preconditions. Both are
/// parametric in the type of the state, here denoted by the
/// [heap:Type] variable.
(** Preconditions are predicates on the [heap] *)
let st_pre_h (heap: Type) = heap -> GTot Type0
(** Postconditions relate [a]-typed results to the final [heap], here
refined by some pure proposition [pre], typically instantiated to
the precondition applied to the initial [heap] *)
let st_post_h' (heap a pre: Type) = a -> _: heap{pre} -> GTot Type0
(** Postconditions without refinements *)
let st_post_h (heap a: Type) = st_post_h' heap a True
(** The type of the main WP-transformer for stateful computations *)
let st_wp_h (heap a: Type) = st_post_h heap a -> Tot (st_pre_h heap)
(** Returning a value does not transform the state *)
unfold
let st_return (heap a: Type) (x: a) (p: st_post_h heap a) = p x
(** Sequential composition of stateful WPs *)
unfold
let st_bind_wp
(heap: Type)
(a b: Type)
(wp1: st_wp_h heap a)
(wp2: (a -> GTot (st_wp_h heap b)))
(p: st_post_h heap b)
(h0: heap)
= wp1 (fun a h1 -> wp2 a p h1) h0
(** Branching for stateful WPs *)
unfold
let st_if_then_else
(heap a p: Type)
(wp_then wp_else: st_wp_h heap a)
(post: st_post_h heap a)
(h0: heap)
= wp_then post h0 /\ (~p ==> wp_else post h0)
(** As with [PURE] the [wp] combinator names the postcondition as
[k] to avoid duplicating it. *)
unfold
let st_ite_wp (heap a: Type) (wp: st_wp_h heap a) (post: st_post_h heap a) (h0: heap) =
forall (k: st_post_h heap a).
(forall (x: a) (h: heap). {:pattern (guard_free (k x h))} post x h ==> k x h) ==> wp k h0
(** Subsumption for stateful WPs *)
unfold
let st_stronger (heap a: Type) (wp1 wp2: st_wp_h heap a) =
(forall (p: st_post_h heap a) (h: heap). wp1 p h ==> wp2 p h)
(** Closing the scope of a binder within a stateful WP *)
unfold
let st_close_wp (heap a b: Type) (wp: (b -> GTot (st_wp_h heap a))) (p: st_post_h heap a) (h: heap) =
(forall (b: b). wp b p h)
(** Applying a stateful WP to a trivial postcondition *)
unfold
let st_trivial (heap a: Type) (wp: st_wp_h heap a) = (forall h0. wp (fun r h1 -> True) h0)
(** Introducing a new effect template [STATE_h] *)
new_effect {
STATE_h (heap: Type) : result: Type -> wp: st_wp_h heap result -> Effect
with
return_wp = st_return heap
; bind_wp = st_bind_wp heap
; if_then_else = st_if_then_else heap
; ite_wp = st_ite_wp heap
; stronger = st_stronger heap
; close_wp = st_close_wp heap
; trivial = st_trivial heap
}
/// The [EXN] effect for computations that may raise exceptions or
/// fatal errors
///
/// Weakest preconditions for stateful computations transform
/// [ex_post] postconditions (predicates on [result]s) to [ex_pre]
/// precondition propositions.
(** Normal results are represented using [V x].
Handleable exceptions are represented [E e].
Fatal errors are [Err msg]. *)
noeq
type result (a: Type) =
| V : v: a -> result a
| E : e: exn -> result a
| Err : msg: string -> result a
(** Exceptional preconditions are just propositions *)
let ex_pre = Type0
(** Postconditions on results refined by a precondition *)
let ex_post' (a pre: Type) = _: result a {pre} -> GTot Type0
(** Postconditions on results *)
let ex_post (a: Type) = ex_post' a True | false | true | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val ex_wp : a: Type -> Type | [] | FStar.Pervasives.ex_wp | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: Type -> Type | {
"end_col": 46,
"end_line": 531,
"start_col": 22,
"start_line": 531
} |
|
Prims.Tot | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let ex_post (a: Type) = ex_post' a True | let ex_post (a: Type) = | false | null | false | ex_post' a True | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"total"
] | [
"FStar.Pervasives.ex_post'",
"Prims.l_True"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold
let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp
unfold
let pure_close_wp (a b:Type) (wp:b -> Tot (pure_wp a)) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp
unfold
let pure_null_wp (a:Type) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_null_wp0 a
[@@ "opaque_to_smt"]
unfold
let pure_assert_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assert_wp0 p
[@@ "opaque_to_smt"]
unfold
let pure_assume_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assume_wp0 p
/// The [DIV] effect for divergent computations
///
/// The wp-calculus for [DIV] is same as that of [PURE]
(** The effect of divergence: from a specificational perspective it is
identical to PURE, however the specs are given a partial
correctness interpretation. Computations with the [DIV] effect may
not terminate. *)
new_effect {
DIV : a:Type -> wp:pure_wp a -> Effect
with
return_wp = pure_return
; bind_wp = pure_bind_wp
; if_then_else = pure_if_then_else
; ite_wp = pure_ite_wp
; stronger = pure_stronger
; close_wp = pure_close_wp
; trivial = pure_trivial
}
(** [PURE] computations can be silently promoted for use in a [DIV] context *)
sub_effect PURE ~> DIV { lift_wp = purewp_id }
(** [Div] is the Hoare-style counterpart of the wp-indexed [DIV] *)
unfold
let div_hoare_to_wp (#a:Type) (#pre:pure_pre) (post:pure_post' a pre) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
fun (p:pure_post a) -> pre /\ (forall a. post a ==> p a)
effect Div (a: Type) (pre: pure_pre) (post: pure_post' a pre) =
DIV a (div_hoare_to_wp post)
(** [Dv] is the instance of [DIV] with trivial pre- and postconditions *)
effect Dv (a: Type) = DIV a (pure_null_wp a)
(** We use the [EXT] effect to underspecify external system calls
as being impure but having no observable effect on the state *)
effect EXT (a: Type) = Dv a
/// The [STATE_h] effect template for stateful computations, generic
/// in the type of the state.
///
/// Note, [STATE_h] is itself not a computation type in F*, since it
/// is parameterized by the type of heap. However, instantiations of
/// [STATE_h] with specific types of the heap are computation
/// types. See, e.g., [FStar.ST] for such instantiations.
///
/// Weakest preconditions for stateful computations transform
/// [st_post_h] postconditions to [st_pre_h] preconditions. Both are
/// parametric in the type of the state, here denoted by the
/// [heap:Type] variable.
(** Preconditions are predicates on the [heap] *)
let st_pre_h (heap: Type) = heap -> GTot Type0
(** Postconditions relate [a]-typed results to the final [heap], here
refined by some pure proposition [pre], typically instantiated to
the precondition applied to the initial [heap] *)
let st_post_h' (heap a pre: Type) = a -> _: heap{pre} -> GTot Type0
(** Postconditions without refinements *)
let st_post_h (heap a: Type) = st_post_h' heap a True
(** The type of the main WP-transformer for stateful computations *)
let st_wp_h (heap a: Type) = st_post_h heap a -> Tot (st_pre_h heap)
(** Returning a value does not transform the state *)
unfold
let st_return (heap a: Type) (x: a) (p: st_post_h heap a) = p x
(** Sequential composition of stateful WPs *)
unfold
let st_bind_wp
(heap: Type)
(a b: Type)
(wp1: st_wp_h heap a)
(wp2: (a -> GTot (st_wp_h heap b)))
(p: st_post_h heap b)
(h0: heap)
= wp1 (fun a h1 -> wp2 a p h1) h0
(** Branching for stateful WPs *)
unfold
let st_if_then_else
(heap a p: Type)
(wp_then wp_else: st_wp_h heap a)
(post: st_post_h heap a)
(h0: heap)
= wp_then post h0 /\ (~p ==> wp_else post h0)
(** As with [PURE] the [wp] combinator names the postcondition as
[k] to avoid duplicating it. *)
unfold
let st_ite_wp (heap a: Type) (wp: st_wp_h heap a) (post: st_post_h heap a) (h0: heap) =
forall (k: st_post_h heap a).
(forall (x: a) (h: heap). {:pattern (guard_free (k x h))} post x h ==> k x h) ==> wp k h0
(** Subsumption for stateful WPs *)
unfold
let st_stronger (heap a: Type) (wp1 wp2: st_wp_h heap a) =
(forall (p: st_post_h heap a) (h: heap). wp1 p h ==> wp2 p h)
(** Closing the scope of a binder within a stateful WP *)
unfold
let st_close_wp (heap a b: Type) (wp: (b -> GTot (st_wp_h heap a))) (p: st_post_h heap a) (h: heap) =
(forall (b: b). wp b p h)
(** Applying a stateful WP to a trivial postcondition *)
unfold
let st_trivial (heap a: Type) (wp: st_wp_h heap a) = (forall h0. wp (fun r h1 -> True) h0)
(** Introducing a new effect template [STATE_h] *)
new_effect {
STATE_h (heap: Type) : result: Type -> wp: st_wp_h heap result -> Effect
with
return_wp = st_return heap
; bind_wp = st_bind_wp heap
; if_then_else = st_if_then_else heap
; ite_wp = st_ite_wp heap
; stronger = st_stronger heap
; close_wp = st_close_wp heap
; trivial = st_trivial heap
}
/// The [EXN] effect for computations that may raise exceptions or
/// fatal errors
///
/// Weakest preconditions for stateful computations transform
/// [ex_post] postconditions (predicates on [result]s) to [ex_pre]
/// precondition propositions.
(** Normal results are represented using [V x].
Handleable exceptions are represented [E e].
Fatal errors are [Err msg]. *)
noeq
type result (a: Type) =
| V : v: a -> result a
| E : e: exn -> result a
| Err : msg: string -> result a
(** Exceptional preconditions are just propositions *)
let ex_pre = Type0
(** Postconditions on results refined by a precondition *)
let ex_post' (a pre: Type) = _: result a {pre} -> GTot Type0 | false | true | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val ex_post : a: Type -> Type | [] | FStar.Pervasives.ex_post | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: Type -> Type | {
"end_col": 39,
"end_line": 528,
"start_col": 24,
"start_line": 528
} |
|
Prims.Tot | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let ex_if_then_else (a p: Type) (wp_then wp_else: ex_wp a) (post: ex_post a) =
wp_then post /\ (~p ==> wp_else post) | let ex_if_then_else (a p: Type) (wp_then wp_else: ex_wp a) (post: ex_post a) = | false | null | false | wp_then post /\ (~p ==> wp_else post) | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"total"
] | [
"FStar.Pervasives.ex_wp",
"FStar.Pervasives.ex_post",
"Prims.l_and",
"Prims.l_imp",
"Prims.l_not",
"Prims.logical"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold
let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp
unfold
let pure_close_wp (a b:Type) (wp:b -> Tot (pure_wp a)) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp
unfold
let pure_null_wp (a:Type) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_null_wp0 a
[@@ "opaque_to_smt"]
unfold
let pure_assert_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assert_wp0 p
[@@ "opaque_to_smt"]
unfold
let pure_assume_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assume_wp0 p
/// The [DIV] effect for divergent computations
///
/// The wp-calculus for [DIV] is same as that of [PURE]
(** The effect of divergence: from a specificational perspective it is
identical to PURE, however the specs are given a partial
correctness interpretation. Computations with the [DIV] effect may
not terminate. *)
new_effect {
DIV : a:Type -> wp:pure_wp a -> Effect
with
return_wp = pure_return
; bind_wp = pure_bind_wp
; if_then_else = pure_if_then_else
; ite_wp = pure_ite_wp
; stronger = pure_stronger
; close_wp = pure_close_wp
; trivial = pure_trivial
}
(** [PURE] computations can be silently promoted for use in a [DIV] context *)
sub_effect PURE ~> DIV { lift_wp = purewp_id }
(** [Div] is the Hoare-style counterpart of the wp-indexed [DIV] *)
unfold
let div_hoare_to_wp (#a:Type) (#pre:pure_pre) (post:pure_post' a pre) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
fun (p:pure_post a) -> pre /\ (forall a. post a ==> p a)
effect Div (a: Type) (pre: pure_pre) (post: pure_post' a pre) =
DIV a (div_hoare_to_wp post)
(** [Dv] is the instance of [DIV] with trivial pre- and postconditions *)
effect Dv (a: Type) = DIV a (pure_null_wp a)
(** We use the [EXT] effect to underspecify external system calls
as being impure but having no observable effect on the state *)
effect EXT (a: Type) = Dv a
/// The [STATE_h] effect template for stateful computations, generic
/// in the type of the state.
///
/// Note, [STATE_h] is itself not a computation type in F*, since it
/// is parameterized by the type of heap. However, instantiations of
/// [STATE_h] with specific types of the heap are computation
/// types. See, e.g., [FStar.ST] for such instantiations.
///
/// Weakest preconditions for stateful computations transform
/// [st_post_h] postconditions to [st_pre_h] preconditions. Both are
/// parametric in the type of the state, here denoted by the
/// [heap:Type] variable.
(** Preconditions are predicates on the [heap] *)
let st_pre_h (heap: Type) = heap -> GTot Type0
(** Postconditions relate [a]-typed results to the final [heap], here
refined by some pure proposition [pre], typically instantiated to
the precondition applied to the initial [heap] *)
let st_post_h' (heap a pre: Type) = a -> _: heap{pre} -> GTot Type0
(** Postconditions without refinements *)
let st_post_h (heap a: Type) = st_post_h' heap a True
(** The type of the main WP-transformer for stateful computations *)
let st_wp_h (heap a: Type) = st_post_h heap a -> Tot (st_pre_h heap)
(** Returning a value does not transform the state *)
unfold
let st_return (heap a: Type) (x: a) (p: st_post_h heap a) = p x
(** Sequential composition of stateful WPs *)
unfold
let st_bind_wp
(heap: Type)
(a b: Type)
(wp1: st_wp_h heap a)
(wp2: (a -> GTot (st_wp_h heap b)))
(p: st_post_h heap b)
(h0: heap)
= wp1 (fun a h1 -> wp2 a p h1) h0
(** Branching for stateful WPs *)
unfold
let st_if_then_else
(heap a p: Type)
(wp_then wp_else: st_wp_h heap a)
(post: st_post_h heap a)
(h0: heap)
= wp_then post h0 /\ (~p ==> wp_else post h0)
(** As with [PURE] the [wp] combinator names the postcondition as
[k] to avoid duplicating it. *)
unfold
let st_ite_wp (heap a: Type) (wp: st_wp_h heap a) (post: st_post_h heap a) (h0: heap) =
forall (k: st_post_h heap a).
(forall (x: a) (h: heap). {:pattern (guard_free (k x h))} post x h ==> k x h) ==> wp k h0
(** Subsumption for stateful WPs *)
unfold
let st_stronger (heap a: Type) (wp1 wp2: st_wp_h heap a) =
(forall (p: st_post_h heap a) (h: heap). wp1 p h ==> wp2 p h)
(** Closing the scope of a binder within a stateful WP *)
unfold
let st_close_wp (heap a b: Type) (wp: (b -> GTot (st_wp_h heap a))) (p: st_post_h heap a) (h: heap) =
(forall (b: b). wp b p h)
(** Applying a stateful WP to a trivial postcondition *)
unfold
let st_trivial (heap a: Type) (wp: st_wp_h heap a) = (forall h0. wp (fun r h1 -> True) h0)
(** Introducing a new effect template [STATE_h] *)
new_effect {
STATE_h (heap: Type) : result: Type -> wp: st_wp_h heap result -> Effect
with
return_wp = st_return heap
; bind_wp = st_bind_wp heap
; if_then_else = st_if_then_else heap
; ite_wp = st_ite_wp heap
; stronger = st_stronger heap
; close_wp = st_close_wp heap
; trivial = st_trivial heap
}
/// The [EXN] effect for computations that may raise exceptions or
/// fatal errors
///
/// Weakest preconditions for stateful computations transform
/// [ex_post] postconditions (predicates on [result]s) to [ex_pre]
/// precondition propositions.
(** Normal results are represented using [V x].
Handleable exceptions are represented [E e].
Fatal errors are [Err msg]. *)
noeq
type result (a: Type) =
| V : v: a -> result a
| E : e: exn -> result a
| Err : msg: string -> result a
(** Exceptional preconditions are just propositions *)
let ex_pre = Type0
(** Postconditions on results refined by a precondition *)
let ex_post' (a pre: Type) = _: result a {pre} -> GTot Type0
(** Postconditions on results *)
let ex_post (a: Type) = ex_post' a True
(** Exceptions WP-predicate transformers *)
let ex_wp (a: Type) = ex_post a -> GTot ex_pre
(** Returning a value [x] normally promotes it to the [V x] result *)
unfold
let ex_return (a: Type) (x: a) (p: ex_post a) : GTot Type0 = p (V x)
(** Sequential composition of exception-raising code requires case analysing
the result of the first computation before "running" the second one *)
unfold
let ex_bind_wp (a b: Type) (wp1: ex_wp a) (wp2: (a -> GTot (ex_wp b))) (p: ex_post b)
: GTot Type0 =
forall (k: ex_post b).
(forall (rb: result b). {:pattern (guard_free (k rb))} p rb ==> k rb) ==>
(wp1 (function
| V ra1 -> wp2 ra1 k
| E e -> k (E e)
| Err m -> k (Err m)))
(** As for other effects, branching in [ex_wp] appears in two forms.
First, a simple case analysis on [p] *)
unfold | false | false | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val ex_if_then_else : a: Type ->
p: Type0 ->
wp_then: FStar.Pervasives.ex_wp a ->
wp_else: FStar.Pervasives.ex_wp a ->
post: FStar.Pervasives.ex_post a
-> Prims.logical | [] | FStar.Pervasives.ex_if_then_else | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
a: Type ->
p: Type0 ->
wp_then: FStar.Pervasives.ex_wp a ->
wp_else: FStar.Pervasives.ex_wp a ->
post: FStar.Pervasives.ex_post a
-> Prims.logical | {
"end_col": 39,
"end_line": 553,
"start_col": 2,
"start_line": 553
} |
|
Prims.Tot | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let ex_post' (a pre: Type) = _: result a {pre} -> GTot Type0 | let ex_post' (a pre: Type) = | false | null | false | _: result a {pre} -> GTot Type0 | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"total"
] | [
"FStar.Pervasives.result"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold
let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp
unfold
let pure_close_wp (a b:Type) (wp:b -> Tot (pure_wp a)) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp
unfold
let pure_null_wp (a:Type) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_null_wp0 a
[@@ "opaque_to_smt"]
unfold
let pure_assert_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assert_wp0 p
[@@ "opaque_to_smt"]
unfold
let pure_assume_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assume_wp0 p
/// The [DIV] effect for divergent computations
///
/// The wp-calculus for [DIV] is same as that of [PURE]
(** The effect of divergence: from a specificational perspective it is
identical to PURE, however the specs are given a partial
correctness interpretation. Computations with the [DIV] effect may
not terminate. *)
new_effect {
DIV : a:Type -> wp:pure_wp a -> Effect
with
return_wp = pure_return
; bind_wp = pure_bind_wp
; if_then_else = pure_if_then_else
; ite_wp = pure_ite_wp
; stronger = pure_stronger
; close_wp = pure_close_wp
; trivial = pure_trivial
}
(** [PURE] computations can be silently promoted for use in a [DIV] context *)
sub_effect PURE ~> DIV { lift_wp = purewp_id }
(** [Div] is the Hoare-style counterpart of the wp-indexed [DIV] *)
unfold
let div_hoare_to_wp (#a:Type) (#pre:pure_pre) (post:pure_post' a pre) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
fun (p:pure_post a) -> pre /\ (forall a. post a ==> p a)
effect Div (a: Type) (pre: pure_pre) (post: pure_post' a pre) =
DIV a (div_hoare_to_wp post)
(** [Dv] is the instance of [DIV] with trivial pre- and postconditions *)
effect Dv (a: Type) = DIV a (pure_null_wp a)
(** We use the [EXT] effect to underspecify external system calls
as being impure but having no observable effect on the state *)
effect EXT (a: Type) = Dv a
/// The [STATE_h] effect template for stateful computations, generic
/// in the type of the state.
///
/// Note, [STATE_h] is itself not a computation type in F*, since it
/// is parameterized by the type of heap. However, instantiations of
/// [STATE_h] with specific types of the heap are computation
/// types. See, e.g., [FStar.ST] for such instantiations.
///
/// Weakest preconditions for stateful computations transform
/// [st_post_h] postconditions to [st_pre_h] preconditions. Both are
/// parametric in the type of the state, here denoted by the
/// [heap:Type] variable.
(** Preconditions are predicates on the [heap] *)
let st_pre_h (heap: Type) = heap -> GTot Type0
(** Postconditions relate [a]-typed results to the final [heap], here
refined by some pure proposition [pre], typically instantiated to
the precondition applied to the initial [heap] *)
let st_post_h' (heap a pre: Type) = a -> _: heap{pre} -> GTot Type0
(** Postconditions without refinements *)
let st_post_h (heap a: Type) = st_post_h' heap a True
(** The type of the main WP-transformer for stateful computations *)
let st_wp_h (heap a: Type) = st_post_h heap a -> Tot (st_pre_h heap)
(** Returning a value does not transform the state *)
unfold
let st_return (heap a: Type) (x: a) (p: st_post_h heap a) = p x
(** Sequential composition of stateful WPs *)
unfold
let st_bind_wp
(heap: Type)
(a b: Type)
(wp1: st_wp_h heap a)
(wp2: (a -> GTot (st_wp_h heap b)))
(p: st_post_h heap b)
(h0: heap)
= wp1 (fun a h1 -> wp2 a p h1) h0
(** Branching for stateful WPs *)
unfold
let st_if_then_else
(heap a p: Type)
(wp_then wp_else: st_wp_h heap a)
(post: st_post_h heap a)
(h0: heap)
= wp_then post h0 /\ (~p ==> wp_else post h0)
(** As with [PURE] the [wp] combinator names the postcondition as
[k] to avoid duplicating it. *)
unfold
let st_ite_wp (heap a: Type) (wp: st_wp_h heap a) (post: st_post_h heap a) (h0: heap) =
forall (k: st_post_h heap a).
(forall (x: a) (h: heap). {:pattern (guard_free (k x h))} post x h ==> k x h) ==> wp k h0
(** Subsumption for stateful WPs *)
unfold
let st_stronger (heap a: Type) (wp1 wp2: st_wp_h heap a) =
(forall (p: st_post_h heap a) (h: heap). wp1 p h ==> wp2 p h)
(** Closing the scope of a binder within a stateful WP *)
unfold
let st_close_wp (heap a b: Type) (wp: (b -> GTot (st_wp_h heap a))) (p: st_post_h heap a) (h: heap) =
(forall (b: b). wp b p h)
(** Applying a stateful WP to a trivial postcondition *)
unfold
let st_trivial (heap a: Type) (wp: st_wp_h heap a) = (forall h0. wp (fun r h1 -> True) h0)
(** Introducing a new effect template [STATE_h] *)
new_effect {
STATE_h (heap: Type) : result: Type -> wp: st_wp_h heap result -> Effect
with
return_wp = st_return heap
; bind_wp = st_bind_wp heap
; if_then_else = st_if_then_else heap
; ite_wp = st_ite_wp heap
; stronger = st_stronger heap
; close_wp = st_close_wp heap
; trivial = st_trivial heap
}
/// The [EXN] effect for computations that may raise exceptions or
/// fatal errors
///
/// Weakest preconditions for stateful computations transform
/// [ex_post] postconditions (predicates on [result]s) to [ex_pre]
/// precondition propositions.
(** Normal results are represented using [V x].
Handleable exceptions are represented [E e].
Fatal errors are [Err msg]. *)
noeq
type result (a: Type) =
| V : v: a -> result a
| E : e: exn -> result a
| Err : msg: string -> result a
(** Exceptional preconditions are just propositions *)
let ex_pre = Type0 | false | true | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val ex_post' : a: Type -> pre: Type -> Type | [] | FStar.Pervasives.ex_post' | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: Type -> pre: Type -> Type | {
"end_col": 60,
"end_line": 525,
"start_col": 29,
"start_line": 525
} |
|
Prims.Tot | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let st_if_then_else
(heap a p: Type)
(wp_then wp_else: st_wp_h heap a)
(post: st_post_h heap a)
(h0: heap)
= wp_then post h0 /\ (~p ==> wp_else post h0) | let st_if_then_else
(heap a p: Type)
(wp_then wp_else: st_wp_h heap a)
(post: st_post_h heap a)
(h0: heap)
= | false | null | false | wp_then post h0 /\ (~p ==> wp_else post h0) | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"total"
] | [
"FStar.Pervasives.st_wp_h",
"FStar.Pervasives.st_post_h",
"Prims.l_and",
"Prims.l_imp",
"Prims.l_not",
"Prims.logical"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold
let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp
unfold
let pure_close_wp (a b:Type) (wp:b -> Tot (pure_wp a)) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp
unfold
let pure_null_wp (a:Type) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_null_wp0 a
[@@ "opaque_to_smt"]
unfold
let pure_assert_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assert_wp0 p
[@@ "opaque_to_smt"]
unfold
let pure_assume_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assume_wp0 p
/// The [DIV] effect for divergent computations
///
/// The wp-calculus for [DIV] is same as that of [PURE]
(** The effect of divergence: from a specificational perspective it is
identical to PURE, however the specs are given a partial
correctness interpretation. Computations with the [DIV] effect may
not terminate. *)
new_effect {
DIV : a:Type -> wp:pure_wp a -> Effect
with
return_wp = pure_return
; bind_wp = pure_bind_wp
; if_then_else = pure_if_then_else
; ite_wp = pure_ite_wp
; stronger = pure_stronger
; close_wp = pure_close_wp
; trivial = pure_trivial
}
(** [PURE] computations can be silently promoted for use in a [DIV] context *)
sub_effect PURE ~> DIV { lift_wp = purewp_id }
(** [Div] is the Hoare-style counterpart of the wp-indexed [DIV] *)
unfold
let div_hoare_to_wp (#a:Type) (#pre:pure_pre) (post:pure_post' a pre) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
fun (p:pure_post a) -> pre /\ (forall a. post a ==> p a)
effect Div (a: Type) (pre: pure_pre) (post: pure_post' a pre) =
DIV a (div_hoare_to_wp post)
(** [Dv] is the instance of [DIV] with trivial pre- and postconditions *)
effect Dv (a: Type) = DIV a (pure_null_wp a)
(** We use the [EXT] effect to underspecify external system calls
as being impure but having no observable effect on the state *)
effect EXT (a: Type) = Dv a
/// The [STATE_h] effect template for stateful computations, generic
/// in the type of the state.
///
/// Note, [STATE_h] is itself not a computation type in F*, since it
/// is parameterized by the type of heap. However, instantiations of
/// [STATE_h] with specific types of the heap are computation
/// types. See, e.g., [FStar.ST] for such instantiations.
///
/// Weakest preconditions for stateful computations transform
/// [st_post_h] postconditions to [st_pre_h] preconditions. Both are
/// parametric in the type of the state, here denoted by the
/// [heap:Type] variable.
(** Preconditions are predicates on the [heap] *)
let st_pre_h (heap: Type) = heap -> GTot Type0
(** Postconditions relate [a]-typed results to the final [heap], here
refined by some pure proposition [pre], typically instantiated to
the precondition applied to the initial [heap] *)
let st_post_h' (heap a pre: Type) = a -> _: heap{pre} -> GTot Type0
(** Postconditions without refinements *)
let st_post_h (heap a: Type) = st_post_h' heap a True
(** The type of the main WP-transformer for stateful computations *)
let st_wp_h (heap a: Type) = st_post_h heap a -> Tot (st_pre_h heap)
(** Returning a value does not transform the state *)
unfold
let st_return (heap a: Type) (x: a) (p: st_post_h heap a) = p x
(** Sequential composition of stateful WPs *)
unfold
let st_bind_wp
(heap: Type)
(a b: Type)
(wp1: st_wp_h heap a)
(wp2: (a -> GTot (st_wp_h heap b)))
(p: st_post_h heap b)
(h0: heap)
= wp1 (fun a h1 -> wp2 a p h1) h0
(** Branching for stateful WPs *)
unfold
let st_if_then_else
(heap a p: Type)
(wp_then wp_else: st_wp_h heap a)
(post: st_post_h heap a) | false | false | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val st_if_then_else : heap: Type ->
a: Type ->
p: Type0 ->
wp_then: FStar.Pervasives.st_wp_h heap a ->
wp_else: FStar.Pervasives.st_wp_h heap a ->
post: FStar.Pervasives.st_post_h heap a ->
h0: heap
-> Prims.logical | [] | FStar.Pervasives.st_if_then_else | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
heap: Type ->
a: Type ->
p: Type0 ->
wp_then: FStar.Pervasives.st_wp_h heap a ->
wp_else: FStar.Pervasives.st_wp_h heap a ->
post: FStar.Pervasives.st_post_h heap a ->
h0: heap
-> Prims.logical | {
"end_col": 50,
"end_line": 469,
"start_col": 7,
"start_line": 469
} |
|
Prims.Tot | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let ex_pre = Type0 | let ex_pre = | false | null | false | Type0 | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"total"
] | [] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold
let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp
unfold
let pure_close_wp (a b:Type) (wp:b -> Tot (pure_wp a)) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp
unfold
let pure_null_wp (a:Type) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_null_wp0 a
[@@ "opaque_to_smt"]
unfold
let pure_assert_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assert_wp0 p
[@@ "opaque_to_smt"]
unfold
let pure_assume_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assume_wp0 p
/// The [DIV] effect for divergent computations
///
/// The wp-calculus for [DIV] is same as that of [PURE]
(** The effect of divergence: from a specificational perspective it is
identical to PURE, however the specs are given a partial
correctness interpretation. Computations with the [DIV] effect may
not terminate. *)
new_effect {
DIV : a:Type -> wp:pure_wp a -> Effect
with
return_wp = pure_return
; bind_wp = pure_bind_wp
; if_then_else = pure_if_then_else
; ite_wp = pure_ite_wp
; stronger = pure_stronger
; close_wp = pure_close_wp
; trivial = pure_trivial
}
(** [PURE] computations can be silently promoted for use in a [DIV] context *)
sub_effect PURE ~> DIV { lift_wp = purewp_id }
(** [Div] is the Hoare-style counterpart of the wp-indexed [DIV] *)
unfold
let div_hoare_to_wp (#a:Type) (#pre:pure_pre) (post:pure_post' a pre) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
fun (p:pure_post a) -> pre /\ (forall a. post a ==> p a)
effect Div (a: Type) (pre: pure_pre) (post: pure_post' a pre) =
DIV a (div_hoare_to_wp post)
(** [Dv] is the instance of [DIV] with trivial pre- and postconditions *)
effect Dv (a: Type) = DIV a (pure_null_wp a)
(** We use the [EXT] effect to underspecify external system calls
as being impure but having no observable effect on the state *)
effect EXT (a: Type) = Dv a
/// The [STATE_h] effect template for stateful computations, generic
/// in the type of the state.
///
/// Note, [STATE_h] is itself not a computation type in F*, since it
/// is parameterized by the type of heap. However, instantiations of
/// [STATE_h] with specific types of the heap are computation
/// types. See, e.g., [FStar.ST] for such instantiations.
///
/// Weakest preconditions for stateful computations transform
/// [st_post_h] postconditions to [st_pre_h] preconditions. Both are
/// parametric in the type of the state, here denoted by the
/// [heap:Type] variable.
(** Preconditions are predicates on the [heap] *)
let st_pre_h (heap: Type) = heap -> GTot Type0
(** Postconditions relate [a]-typed results to the final [heap], here
refined by some pure proposition [pre], typically instantiated to
the precondition applied to the initial [heap] *)
let st_post_h' (heap a pre: Type) = a -> _: heap{pre} -> GTot Type0
(** Postconditions without refinements *)
let st_post_h (heap a: Type) = st_post_h' heap a True
(** The type of the main WP-transformer for stateful computations *)
let st_wp_h (heap a: Type) = st_post_h heap a -> Tot (st_pre_h heap)
(** Returning a value does not transform the state *)
unfold
let st_return (heap a: Type) (x: a) (p: st_post_h heap a) = p x
(** Sequential composition of stateful WPs *)
unfold
let st_bind_wp
(heap: Type)
(a b: Type)
(wp1: st_wp_h heap a)
(wp2: (a -> GTot (st_wp_h heap b)))
(p: st_post_h heap b)
(h0: heap)
= wp1 (fun a h1 -> wp2 a p h1) h0
(** Branching for stateful WPs *)
unfold
let st_if_then_else
(heap a p: Type)
(wp_then wp_else: st_wp_h heap a)
(post: st_post_h heap a)
(h0: heap)
= wp_then post h0 /\ (~p ==> wp_else post h0)
(** As with [PURE] the [wp] combinator names the postcondition as
[k] to avoid duplicating it. *)
unfold
let st_ite_wp (heap a: Type) (wp: st_wp_h heap a) (post: st_post_h heap a) (h0: heap) =
forall (k: st_post_h heap a).
(forall (x: a) (h: heap). {:pattern (guard_free (k x h))} post x h ==> k x h) ==> wp k h0
(** Subsumption for stateful WPs *)
unfold
let st_stronger (heap a: Type) (wp1 wp2: st_wp_h heap a) =
(forall (p: st_post_h heap a) (h: heap). wp1 p h ==> wp2 p h)
(** Closing the scope of a binder within a stateful WP *)
unfold
let st_close_wp (heap a b: Type) (wp: (b -> GTot (st_wp_h heap a))) (p: st_post_h heap a) (h: heap) =
(forall (b: b). wp b p h)
(** Applying a stateful WP to a trivial postcondition *)
unfold
let st_trivial (heap a: Type) (wp: st_wp_h heap a) = (forall h0. wp (fun r h1 -> True) h0)
(** Introducing a new effect template [STATE_h] *)
new_effect {
STATE_h (heap: Type) : result: Type -> wp: st_wp_h heap result -> Effect
with
return_wp = st_return heap
; bind_wp = st_bind_wp heap
; if_then_else = st_if_then_else heap
; ite_wp = st_ite_wp heap
; stronger = st_stronger heap
; close_wp = st_close_wp heap
; trivial = st_trivial heap
}
/// The [EXN] effect for computations that may raise exceptions or
/// fatal errors
///
/// Weakest preconditions for stateful computations transform
/// [ex_post] postconditions (predicates on [result]s) to [ex_pre]
/// precondition propositions.
(** Normal results are represented using [V x].
Handleable exceptions are represented [E e].
Fatal errors are [Err msg]. *)
noeq
type result (a: Type) =
| V : v: a -> result a
| E : e: exn -> result a
| Err : msg: string -> result a | false | true | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val ex_pre : Type | [] | FStar.Pervasives.ex_pre | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | Type | {
"end_col": 18,
"end_line": 522,
"start_col": 13,
"start_line": 522
} |
|
Prims.Tot | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let ex_stronger (a: Type) (wp1 wp2: ex_wp a) = (forall (p: ex_post a). wp1 p ==> wp2 p) | let ex_stronger (a: Type) (wp1 wp2: ex_wp a) = | false | null | false | (forall (p: ex_post a). wp1 p ==> wp2 p) | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"total"
] | [
"FStar.Pervasives.ex_wp",
"Prims.l_Forall",
"FStar.Pervasives.ex_post",
"Prims.l_imp",
"Prims.logical"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold
let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp
unfold
let pure_close_wp (a b:Type) (wp:b -> Tot (pure_wp a)) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp
unfold
let pure_null_wp (a:Type) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_null_wp0 a
[@@ "opaque_to_smt"]
unfold
let pure_assert_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assert_wp0 p
[@@ "opaque_to_smt"]
unfold
let pure_assume_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assume_wp0 p
/// The [DIV] effect for divergent computations
///
/// The wp-calculus for [DIV] is same as that of [PURE]
(** The effect of divergence: from a specificational perspective it is
identical to PURE, however the specs are given a partial
correctness interpretation. Computations with the [DIV] effect may
not terminate. *)
new_effect {
DIV : a:Type -> wp:pure_wp a -> Effect
with
return_wp = pure_return
; bind_wp = pure_bind_wp
; if_then_else = pure_if_then_else
; ite_wp = pure_ite_wp
; stronger = pure_stronger
; close_wp = pure_close_wp
; trivial = pure_trivial
}
(** [PURE] computations can be silently promoted for use in a [DIV] context *)
sub_effect PURE ~> DIV { lift_wp = purewp_id }
(** [Div] is the Hoare-style counterpart of the wp-indexed [DIV] *)
unfold
let div_hoare_to_wp (#a:Type) (#pre:pure_pre) (post:pure_post' a pre) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
fun (p:pure_post a) -> pre /\ (forall a. post a ==> p a)
effect Div (a: Type) (pre: pure_pre) (post: pure_post' a pre) =
DIV a (div_hoare_to_wp post)
(** [Dv] is the instance of [DIV] with trivial pre- and postconditions *)
effect Dv (a: Type) = DIV a (pure_null_wp a)
(** We use the [EXT] effect to underspecify external system calls
as being impure but having no observable effect on the state *)
effect EXT (a: Type) = Dv a
/// The [STATE_h] effect template for stateful computations, generic
/// in the type of the state.
///
/// Note, [STATE_h] is itself not a computation type in F*, since it
/// is parameterized by the type of heap. However, instantiations of
/// [STATE_h] with specific types of the heap are computation
/// types. See, e.g., [FStar.ST] for such instantiations.
///
/// Weakest preconditions for stateful computations transform
/// [st_post_h] postconditions to [st_pre_h] preconditions. Both are
/// parametric in the type of the state, here denoted by the
/// [heap:Type] variable.
(** Preconditions are predicates on the [heap] *)
let st_pre_h (heap: Type) = heap -> GTot Type0
(** Postconditions relate [a]-typed results to the final [heap], here
refined by some pure proposition [pre], typically instantiated to
the precondition applied to the initial [heap] *)
let st_post_h' (heap a pre: Type) = a -> _: heap{pre} -> GTot Type0
(** Postconditions without refinements *)
let st_post_h (heap a: Type) = st_post_h' heap a True
(** The type of the main WP-transformer for stateful computations *)
let st_wp_h (heap a: Type) = st_post_h heap a -> Tot (st_pre_h heap)
(** Returning a value does not transform the state *)
unfold
let st_return (heap a: Type) (x: a) (p: st_post_h heap a) = p x
(** Sequential composition of stateful WPs *)
unfold
let st_bind_wp
(heap: Type)
(a b: Type)
(wp1: st_wp_h heap a)
(wp2: (a -> GTot (st_wp_h heap b)))
(p: st_post_h heap b)
(h0: heap)
= wp1 (fun a h1 -> wp2 a p h1) h0
(** Branching for stateful WPs *)
unfold
let st_if_then_else
(heap a p: Type)
(wp_then wp_else: st_wp_h heap a)
(post: st_post_h heap a)
(h0: heap)
= wp_then post h0 /\ (~p ==> wp_else post h0)
(** As with [PURE] the [wp] combinator names the postcondition as
[k] to avoid duplicating it. *)
unfold
let st_ite_wp (heap a: Type) (wp: st_wp_h heap a) (post: st_post_h heap a) (h0: heap) =
forall (k: st_post_h heap a).
(forall (x: a) (h: heap). {:pattern (guard_free (k x h))} post x h ==> k x h) ==> wp k h0
(** Subsumption for stateful WPs *)
unfold
let st_stronger (heap a: Type) (wp1 wp2: st_wp_h heap a) =
(forall (p: st_post_h heap a) (h: heap). wp1 p h ==> wp2 p h)
(** Closing the scope of a binder within a stateful WP *)
unfold
let st_close_wp (heap a b: Type) (wp: (b -> GTot (st_wp_h heap a))) (p: st_post_h heap a) (h: heap) =
(forall (b: b). wp b p h)
(** Applying a stateful WP to a trivial postcondition *)
unfold
let st_trivial (heap a: Type) (wp: st_wp_h heap a) = (forall h0. wp (fun r h1 -> True) h0)
(** Introducing a new effect template [STATE_h] *)
new_effect {
STATE_h (heap: Type) : result: Type -> wp: st_wp_h heap result -> Effect
with
return_wp = st_return heap
; bind_wp = st_bind_wp heap
; if_then_else = st_if_then_else heap
; ite_wp = st_ite_wp heap
; stronger = st_stronger heap
; close_wp = st_close_wp heap
; trivial = st_trivial heap
}
/// The [EXN] effect for computations that may raise exceptions or
/// fatal errors
///
/// Weakest preconditions for stateful computations transform
/// [ex_post] postconditions (predicates on [result]s) to [ex_pre]
/// precondition propositions.
(** Normal results are represented using [V x].
Handleable exceptions are represented [E e].
Fatal errors are [Err msg]. *)
noeq
type result (a: Type) =
| V : v: a -> result a
| E : e: exn -> result a
| Err : msg: string -> result a
(** Exceptional preconditions are just propositions *)
let ex_pre = Type0
(** Postconditions on results refined by a precondition *)
let ex_post' (a pre: Type) = _: result a {pre} -> GTot Type0
(** Postconditions on results *)
let ex_post (a: Type) = ex_post' a True
(** Exceptions WP-predicate transformers *)
let ex_wp (a: Type) = ex_post a -> GTot ex_pre
(** Returning a value [x] normally promotes it to the [V x] result *)
unfold
let ex_return (a: Type) (x: a) (p: ex_post a) : GTot Type0 = p (V x)
(** Sequential composition of exception-raising code requires case analysing
the result of the first computation before "running" the second one *)
unfold
let ex_bind_wp (a b: Type) (wp1: ex_wp a) (wp2: (a -> GTot (ex_wp b))) (p: ex_post b)
: GTot Type0 =
forall (k: ex_post b).
(forall (rb: result b). {:pattern (guard_free (k rb))} p rb ==> k rb) ==>
(wp1 (function
| V ra1 -> wp2 ra1 k
| E e -> k (E e)
| Err m -> k (Err m)))
(** As for other effects, branching in [ex_wp] appears in two forms.
First, a simple case analysis on [p] *)
unfold
let ex_if_then_else (a p: Type) (wp_then wp_else: ex_wp a) (post: ex_post a) =
wp_then post /\ (~p ==> wp_else post)
(** Naming continuations for use with branching *)
unfold
let ex_ite_wp (a: Type) (wp: ex_wp a) (post: ex_post a) =
forall (k: ex_post a).
(forall (rb: result a). {:pattern (guard_free (k rb))} post rb ==> k rb) ==> wp k
(** Subsumption for exceptional WPs *) | false | false | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val ex_stronger : a: Type -> wp1: FStar.Pervasives.ex_wp a -> wp2: FStar.Pervasives.ex_wp a -> Prims.logical | [] | FStar.Pervasives.ex_stronger | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: Type -> wp1: FStar.Pervasives.ex_wp a -> wp2: FStar.Pervasives.ex_wp a -> Prims.logical | {
"end_col": 87,
"end_line": 563,
"start_col": 47,
"start_line": 563
} |
|
Prims.Tot | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let st_ite_wp (heap a: Type) (wp: st_wp_h heap a) (post: st_post_h heap a) (h0: heap) =
forall (k: st_post_h heap a).
(forall (x: a) (h: heap). {:pattern (guard_free (k x h))} post x h ==> k x h) ==> wp k h0 | let st_ite_wp (heap a: Type) (wp: st_wp_h heap a) (post: st_post_h heap a) (h0: heap) = | false | null | false | forall (k: st_post_h heap a).
(forall (x: a) (h: heap). {:pattern (guard_free (k x h))} post x h ==> k x h) ==> wp k h0 | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"total"
] | [
"FStar.Pervasives.st_wp_h",
"FStar.Pervasives.st_post_h",
"Prims.l_Forall",
"Prims.l_imp",
"Prims.guard_free",
"Prims.logical"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold
let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp
unfold
let pure_close_wp (a b:Type) (wp:b -> Tot (pure_wp a)) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp
unfold
let pure_null_wp (a:Type) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_null_wp0 a
[@@ "opaque_to_smt"]
unfold
let pure_assert_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assert_wp0 p
[@@ "opaque_to_smt"]
unfold
let pure_assume_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assume_wp0 p
/// The [DIV] effect for divergent computations
///
/// The wp-calculus for [DIV] is same as that of [PURE]
(** The effect of divergence: from a specificational perspective it is
identical to PURE, however the specs are given a partial
correctness interpretation. Computations with the [DIV] effect may
not terminate. *)
new_effect {
DIV : a:Type -> wp:pure_wp a -> Effect
with
return_wp = pure_return
; bind_wp = pure_bind_wp
; if_then_else = pure_if_then_else
; ite_wp = pure_ite_wp
; stronger = pure_stronger
; close_wp = pure_close_wp
; trivial = pure_trivial
}
(** [PURE] computations can be silently promoted for use in a [DIV] context *)
sub_effect PURE ~> DIV { lift_wp = purewp_id }
(** [Div] is the Hoare-style counterpart of the wp-indexed [DIV] *)
unfold
let div_hoare_to_wp (#a:Type) (#pre:pure_pre) (post:pure_post' a pre) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
fun (p:pure_post a) -> pre /\ (forall a. post a ==> p a)
effect Div (a: Type) (pre: pure_pre) (post: pure_post' a pre) =
DIV a (div_hoare_to_wp post)
(** [Dv] is the instance of [DIV] with trivial pre- and postconditions *)
effect Dv (a: Type) = DIV a (pure_null_wp a)
(** We use the [EXT] effect to underspecify external system calls
as being impure but having no observable effect on the state *)
effect EXT (a: Type) = Dv a
/// The [STATE_h] effect template for stateful computations, generic
/// in the type of the state.
///
/// Note, [STATE_h] is itself not a computation type in F*, since it
/// is parameterized by the type of heap. However, instantiations of
/// [STATE_h] with specific types of the heap are computation
/// types. See, e.g., [FStar.ST] for such instantiations.
///
/// Weakest preconditions for stateful computations transform
/// [st_post_h] postconditions to [st_pre_h] preconditions. Both are
/// parametric in the type of the state, here denoted by the
/// [heap:Type] variable.
(** Preconditions are predicates on the [heap] *)
let st_pre_h (heap: Type) = heap -> GTot Type0
(** Postconditions relate [a]-typed results to the final [heap], here
refined by some pure proposition [pre], typically instantiated to
the precondition applied to the initial [heap] *)
let st_post_h' (heap a pre: Type) = a -> _: heap{pre} -> GTot Type0
(** Postconditions without refinements *)
let st_post_h (heap a: Type) = st_post_h' heap a True
(** The type of the main WP-transformer for stateful computations *)
let st_wp_h (heap a: Type) = st_post_h heap a -> Tot (st_pre_h heap)
(** Returning a value does not transform the state *)
unfold
let st_return (heap a: Type) (x: a) (p: st_post_h heap a) = p x
(** Sequential composition of stateful WPs *)
unfold
let st_bind_wp
(heap: Type)
(a b: Type)
(wp1: st_wp_h heap a)
(wp2: (a -> GTot (st_wp_h heap b)))
(p: st_post_h heap b)
(h0: heap)
= wp1 (fun a h1 -> wp2 a p h1) h0
(** Branching for stateful WPs *)
unfold
let st_if_then_else
(heap a p: Type)
(wp_then wp_else: st_wp_h heap a)
(post: st_post_h heap a)
(h0: heap)
= wp_then post h0 /\ (~p ==> wp_else post h0)
(** As with [PURE] the [wp] combinator names the postcondition as
[k] to avoid duplicating it. *)
unfold | false | false | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val st_ite_wp : heap: Type ->
a: Type ->
wp: FStar.Pervasives.st_wp_h heap a ->
post: FStar.Pervasives.st_post_h heap a ->
h0: heap
-> Prims.logical | [] | FStar.Pervasives.st_ite_wp | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
heap: Type ->
a: Type ->
wp: FStar.Pervasives.st_wp_h heap a ->
post: FStar.Pervasives.st_post_h heap a ->
h0: heap
-> Prims.logical | {
"end_col": 93,
"end_line": 476,
"start_col": 2,
"start_line": 475
} |
|
Prims.GTot | val ex_return (a: Type) (x: a) (p: ex_post a) : GTot Type0 | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let ex_return (a: Type) (x: a) (p: ex_post a) : GTot Type0 = p (V x) | val ex_return (a: Type) (x: a) (p: ex_post a) : GTot Type0
let ex_return (a: Type) (x: a) (p: ex_post a) : GTot Type0 = | false | null | false | p (V x) | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"sometrivial"
] | [
"FStar.Pervasives.ex_post",
"FStar.Pervasives.V"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold
let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp
unfold
let pure_close_wp (a b:Type) (wp:b -> Tot (pure_wp a)) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp
unfold
let pure_null_wp (a:Type) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_null_wp0 a
[@@ "opaque_to_smt"]
unfold
let pure_assert_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assert_wp0 p
[@@ "opaque_to_smt"]
unfold
let pure_assume_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assume_wp0 p
/// The [DIV] effect for divergent computations
///
/// The wp-calculus for [DIV] is same as that of [PURE]
(** The effect of divergence: from a specificational perspective it is
identical to PURE, however the specs are given a partial
correctness interpretation. Computations with the [DIV] effect may
not terminate. *)
new_effect {
DIV : a:Type -> wp:pure_wp a -> Effect
with
return_wp = pure_return
; bind_wp = pure_bind_wp
; if_then_else = pure_if_then_else
; ite_wp = pure_ite_wp
; stronger = pure_stronger
; close_wp = pure_close_wp
; trivial = pure_trivial
}
(** [PURE] computations can be silently promoted for use in a [DIV] context *)
sub_effect PURE ~> DIV { lift_wp = purewp_id }
(** [Div] is the Hoare-style counterpart of the wp-indexed [DIV] *)
unfold
let div_hoare_to_wp (#a:Type) (#pre:pure_pre) (post:pure_post' a pre) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
fun (p:pure_post a) -> pre /\ (forall a. post a ==> p a)
effect Div (a: Type) (pre: pure_pre) (post: pure_post' a pre) =
DIV a (div_hoare_to_wp post)
(** [Dv] is the instance of [DIV] with trivial pre- and postconditions *)
effect Dv (a: Type) = DIV a (pure_null_wp a)
(** We use the [EXT] effect to underspecify external system calls
as being impure but having no observable effect on the state *)
effect EXT (a: Type) = Dv a
/// The [STATE_h] effect template for stateful computations, generic
/// in the type of the state.
///
/// Note, [STATE_h] is itself not a computation type in F*, since it
/// is parameterized by the type of heap. However, instantiations of
/// [STATE_h] with specific types of the heap are computation
/// types. See, e.g., [FStar.ST] for such instantiations.
///
/// Weakest preconditions for stateful computations transform
/// [st_post_h] postconditions to [st_pre_h] preconditions. Both are
/// parametric in the type of the state, here denoted by the
/// [heap:Type] variable.
(** Preconditions are predicates on the [heap] *)
let st_pre_h (heap: Type) = heap -> GTot Type0
(** Postconditions relate [a]-typed results to the final [heap], here
refined by some pure proposition [pre], typically instantiated to
the precondition applied to the initial [heap] *)
let st_post_h' (heap a pre: Type) = a -> _: heap{pre} -> GTot Type0
(** Postconditions without refinements *)
let st_post_h (heap a: Type) = st_post_h' heap a True
(** The type of the main WP-transformer for stateful computations *)
let st_wp_h (heap a: Type) = st_post_h heap a -> Tot (st_pre_h heap)
(** Returning a value does not transform the state *)
unfold
let st_return (heap a: Type) (x: a) (p: st_post_h heap a) = p x
(** Sequential composition of stateful WPs *)
unfold
let st_bind_wp
(heap: Type)
(a b: Type)
(wp1: st_wp_h heap a)
(wp2: (a -> GTot (st_wp_h heap b)))
(p: st_post_h heap b)
(h0: heap)
= wp1 (fun a h1 -> wp2 a p h1) h0
(** Branching for stateful WPs *)
unfold
let st_if_then_else
(heap a p: Type)
(wp_then wp_else: st_wp_h heap a)
(post: st_post_h heap a)
(h0: heap)
= wp_then post h0 /\ (~p ==> wp_else post h0)
(** As with [PURE] the [wp] combinator names the postcondition as
[k] to avoid duplicating it. *)
unfold
let st_ite_wp (heap a: Type) (wp: st_wp_h heap a) (post: st_post_h heap a) (h0: heap) =
forall (k: st_post_h heap a).
(forall (x: a) (h: heap). {:pattern (guard_free (k x h))} post x h ==> k x h) ==> wp k h0
(** Subsumption for stateful WPs *)
unfold
let st_stronger (heap a: Type) (wp1 wp2: st_wp_h heap a) =
(forall (p: st_post_h heap a) (h: heap). wp1 p h ==> wp2 p h)
(** Closing the scope of a binder within a stateful WP *)
unfold
let st_close_wp (heap a b: Type) (wp: (b -> GTot (st_wp_h heap a))) (p: st_post_h heap a) (h: heap) =
(forall (b: b). wp b p h)
(** Applying a stateful WP to a trivial postcondition *)
unfold
let st_trivial (heap a: Type) (wp: st_wp_h heap a) = (forall h0. wp (fun r h1 -> True) h0)
(** Introducing a new effect template [STATE_h] *)
new_effect {
STATE_h (heap: Type) : result: Type -> wp: st_wp_h heap result -> Effect
with
return_wp = st_return heap
; bind_wp = st_bind_wp heap
; if_then_else = st_if_then_else heap
; ite_wp = st_ite_wp heap
; stronger = st_stronger heap
; close_wp = st_close_wp heap
; trivial = st_trivial heap
}
/// The [EXN] effect for computations that may raise exceptions or
/// fatal errors
///
/// Weakest preconditions for stateful computations transform
/// [ex_post] postconditions (predicates on [result]s) to [ex_pre]
/// precondition propositions.
(** Normal results are represented using [V x].
Handleable exceptions are represented [E e].
Fatal errors are [Err msg]. *)
noeq
type result (a: Type) =
| V : v: a -> result a
| E : e: exn -> result a
| Err : msg: string -> result a
(** Exceptional preconditions are just propositions *)
let ex_pre = Type0
(** Postconditions on results refined by a precondition *)
let ex_post' (a pre: Type) = _: result a {pre} -> GTot Type0
(** Postconditions on results *)
let ex_post (a: Type) = ex_post' a True
(** Exceptions WP-predicate transformers *)
let ex_wp (a: Type) = ex_post a -> GTot ex_pre
(** Returning a value [x] normally promotes it to the [V x] result *) | false | false | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val ex_return (a: Type) (x: a) (p: ex_post a) : GTot Type0 | [] | FStar.Pervasives.ex_return | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: Type -> x: a -> p: FStar.Pervasives.ex_post a -> Prims.GTot Type0 | {
"end_col": 68,
"end_line": 535,
"start_col": 61,
"start_line": 535
} |
Prims.Tot | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let all_post_h (h a: Type) = all_post_h' h a True | let all_post_h (h a: Type) = | false | null | false | all_post_h' h a True | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"total"
] | [
"FStar.Pervasives.all_post_h'",
"Prims.l_True"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold
let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp
unfold
let pure_close_wp (a b:Type) (wp:b -> Tot (pure_wp a)) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp
unfold
let pure_null_wp (a:Type) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_null_wp0 a
[@@ "opaque_to_smt"]
unfold
let pure_assert_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assert_wp0 p
[@@ "opaque_to_smt"]
unfold
let pure_assume_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assume_wp0 p
/// The [DIV] effect for divergent computations
///
/// The wp-calculus for [DIV] is same as that of [PURE]
(** The effect of divergence: from a specificational perspective it is
identical to PURE, however the specs are given a partial
correctness interpretation. Computations with the [DIV] effect may
not terminate. *)
new_effect {
DIV : a:Type -> wp:pure_wp a -> Effect
with
return_wp = pure_return
; bind_wp = pure_bind_wp
; if_then_else = pure_if_then_else
; ite_wp = pure_ite_wp
; stronger = pure_stronger
; close_wp = pure_close_wp
; trivial = pure_trivial
}
(** [PURE] computations can be silently promoted for use in a [DIV] context *)
sub_effect PURE ~> DIV { lift_wp = purewp_id }
(** [Div] is the Hoare-style counterpart of the wp-indexed [DIV] *)
unfold
let div_hoare_to_wp (#a:Type) (#pre:pure_pre) (post:pure_post' a pre) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
fun (p:pure_post a) -> pre /\ (forall a. post a ==> p a)
effect Div (a: Type) (pre: pure_pre) (post: pure_post' a pre) =
DIV a (div_hoare_to_wp post)
(** [Dv] is the instance of [DIV] with trivial pre- and postconditions *)
effect Dv (a: Type) = DIV a (pure_null_wp a)
(** We use the [EXT] effect to underspecify external system calls
as being impure but having no observable effect on the state *)
effect EXT (a: Type) = Dv a
/// The [STATE_h] effect template for stateful computations, generic
/// in the type of the state.
///
/// Note, [STATE_h] is itself not a computation type in F*, since it
/// is parameterized by the type of heap. However, instantiations of
/// [STATE_h] with specific types of the heap are computation
/// types. See, e.g., [FStar.ST] for such instantiations.
///
/// Weakest preconditions for stateful computations transform
/// [st_post_h] postconditions to [st_pre_h] preconditions. Both are
/// parametric in the type of the state, here denoted by the
/// [heap:Type] variable.
(** Preconditions are predicates on the [heap] *)
let st_pre_h (heap: Type) = heap -> GTot Type0
(** Postconditions relate [a]-typed results to the final [heap], here
refined by some pure proposition [pre], typically instantiated to
the precondition applied to the initial [heap] *)
let st_post_h' (heap a pre: Type) = a -> _: heap{pre} -> GTot Type0
(** Postconditions without refinements *)
let st_post_h (heap a: Type) = st_post_h' heap a True
(** The type of the main WP-transformer for stateful computations *)
let st_wp_h (heap a: Type) = st_post_h heap a -> Tot (st_pre_h heap)
(** Returning a value does not transform the state *)
unfold
let st_return (heap a: Type) (x: a) (p: st_post_h heap a) = p x
(** Sequential composition of stateful WPs *)
unfold
let st_bind_wp
(heap: Type)
(a b: Type)
(wp1: st_wp_h heap a)
(wp2: (a -> GTot (st_wp_h heap b)))
(p: st_post_h heap b)
(h0: heap)
= wp1 (fun a h1 -> wp2 a p h1) h0
(** Branching for stateful WPs *)
unfold
let st_if_then_else
(heap a p: Type)
(wp_then wp_else: st_wp_h heap a)
(post: st_post_h heap a)
(h0: heap)
= wp_then post h0 /\ (~p ==> wp_else post h0)
(** As with [PURE] the [wp] combinator names the postcondition as
[k] to avoid duplicating it. *)
unfold
let st_ite_wp (heap a: Type) (wp: st_wp_h heap a) (post: st_post_h heap a) (h0: heap) =
forall (k: st_post_h heap a).
(forall (x: a) (h: heap). {:pattern (guard_free (k x h))} post x h ==> k x h) ==> wp k h0
(** Subsumption for stateful WPs *)
unfold
let st_stronger (heap a: Type) (wp1 wp2: st_wp_h heap a) =
(forall (p: st_post_h heap a) (h: heap). wp1 p h ==> wp2 p h)
(** Closing the scope of a binder within a stateful WP *)
unfold
let st_close_wp (heap a b: Type) (wp: (b -> GTot (st_wp_h heap a))) (p: st_post_h heap a) (h: heap) =
(forall (b: b). wp b p h)
(** Applying a stateful WP to a trivial postcondition *)
unfold
let st_trivial (heap a: Type) (wp: st_wp_h heap a) = (forall h0. wp (fun r h1 -> True) h0)
(** Introducing a new effect template [STATE_h] *)
new_effect {
STATE_h (heap: Type) : result: Type -> wp: st_wp_h heap result -> Effect
with
return_wp = st_return heap
; bind_wp = st_bind_wp heap
; if_then_else = st_if_then_else heap
; ite_wp = st_ite_wp heap
; stronger = st_stronger heap
; close_wp = st_close_wp heap
; trivial = st_trivial heap
}
/// The [EXN] effect for computations that may raise exceptions or
/// fatal errors
///
/// Weakest preconditions for stateful computations transform
/// [ex_post] postconditions (predicates on [result]s) to [ex_pre]
/// precondition propositions.
(** Normal results are represented using [V x].
Handleable exceptions are represented [E e].
Fatal errors are [Err msg]. *)
noeq
type result (a: Type) =
| V : v: a -> result a
| E : e: exn -> result a
| Err : msg: string -> result a
(** Exceptional preconditions are just propositions *)
let ex_pre = Type0
(** Postconditions on results refined by a precondition *)
let ex_post' (a pre: Type) = _: result a {pre} -> GTot Type0
(** Postconditions on results *)
let ex_post (a: Type) = ex_post' a True
(** Exceptions WP-predicate transformers *)
let ex_wp (a: Type) = ex_post a -> GTot ex_pre
(** Returning a value [x] normally promotes it to the [V x] result *)
unfold
let ex_return (a: Type) (x: a) (p: ex_post a) : GTot Type0 = p (V x)
(** Sequential composition of exception-raising code requires case analysing
the result of the first computation before "running" the second one *)
unfold
let ex_bind_wp (a b: Type) (wp1: ex_wp a) (wp2: (a -> GTot (ex_wp b))) (p: ex_post b)
: GTot Type0 =
forall (k: ex_post b).
(forall (rb: result b). {:pattern (guard_free (k rb))} p rb ==> k rb) ==>
(wp1 (function
| V ra1 -> wp2 ra1 k
| E e -> k (E e)
| Err m -> k (Err m)))
(** As for other effects, branching in [ex_wp] appears in two forms.
First, a simple case analysis on [p] *)
unfold
let ex_if_then_else (a p: Type) (wp_then wp_else: ex_wp a) (post: ex_post a) =
wp_then post /\ (~p ==> wp_else post)
(** Naming continuations for use with branching *)
unfold
let ex_ite_wp (a: Type) (wp: ex_wp a) (post: ex_post a) =
forall (k: ex_post a).
(forall (rb: result a). {:pattern (guard_free (k rb))} post rb ==> k rb) ==> wp k
(** Subsumption for exceptional WPs *)
unfold
let ex_stronger (a: Type) (wp1 wp2: ex_wp a) = (forall (p: ex_post a). wp1 p ==> wp2 p)
(** Closing the scope of a binder for exceptional WPs *)
unfold
let ex_close_wp (a b: Type) (wp: (b -> GTot (ex_wp a))) (p: ex_post a) = (forall (b: b). wp b p)
(** Applying a computation with a trivial postcondition *)
unfold
let ex_trivial (a: Type) (wp: ex_wp a) = wp (fun r -> True)
(** Introduce a new effect for [EXN] *)
new_effect {
EXN : result: Type -> wp: ex_wp result -> Effect
with
return_wp = ex_return
; bind_wp = ex_bind_wp
; if_then_else = ex_if_then_else
; ite_wp = ex_ite_wp
; stronger = ex_stronger
; close_wp = ex_close_wp
; trivial = ex_trivial
}
(** A Hoare-style abbreviation for EXN *)
effect Exn (a: Type) (pre: ex_pre) (post: ex_post' a pre) =
EXN a (fun (p: ex_post a) -> pre /\ (forall (r: result a). post r ==> p r))
(** We include divergence in exceptions.
NOTE: BE WARNED, CODE IN THE [EXN] EFFECT IS ONLY CHECKED FOR
PARTIAL CORRECTNESS *)
unfold
let lift_div_exn (a: Type) (wp: pure_wp a) (p: ex_post a) = wp (fun a -> p (V a))
sub_effect DIV ~> EXN { lift_wp = lift_div_exn }
(** A variant of [Exn] with trivial pre- and postconditions *)
effect Ex (a: Type) = Exn a True (fun v -> True)
/// The [ALL_h] effect template for computations that may diverge,
/// raise exceptions or fatal errors, and uses a generic state.
///
/// Note, this effect is poorly named, particularly as F* has since
/// gained many more user-defined effect. We no longer have an effect
/// that includes all others.
///
/// We might rename this in the future to something like [StExnDiv_h].
///
/// We layer state on top of exceptions, meaning that raising an
/// exception does not discard the state.
///
/// As with [STATE_h], [ALL_h] is not a computation type, though its
/// instantiation with a specific type of [heap] (in FStar.All) is.
(** [all_pre_h] is a predicate on the initial state *)
let all_pre_h (h: Type) = h -> GTot Type0
(** Postconditions relate [result]s to final [heap]s refined by a precondition *)
let all_post_h' (h a pre: Type) = result a -> _: h{pre} -> GTot Type0 | false | true | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val all_post_h : h: Type -> a: Type -> Type | [] | FStar.Pervasives.all_post_h | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | h: Type -> a: Type -> Type | {
"end_col": 49,
"end_line": 623,
"start_col": 29,
"start_line": 623
} |
|
Prims.Tot | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let lift_div_exn (a: Type) (wp: pure_wp a) (p: ex_post a) = wp (fun a -> p (V a)) | let lift_div_exn (a: Type) (wp: pure_wp a) (p: ex_post a) = | false | null | false | wp (fun a -> p (V a)) | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"total"
] | [
"Prims.pure_wp",
"FStar.Pervasives.ex_post",
"Prims.l_True",
"FStar.Pervasives.V",
"Prims.pure_pre"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold
let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp
unfold
let pure_close_wp (a b:Type) (wp:b -> Tot (pure_wp a)) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp
unfold
let pure_null_wp (a:Type) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_null_wp0 a
[@@ "opaque_to_smt"]
unfold
let pure_assert_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assert_wp0 p
[@@ "opaque_to_smt"]
unfold
let pure_assume_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assume_wp0 p
/// The [DIV] effect for divergent computations
///
/// The wp-calculus for [DIV] is same as that of [PURE]
(** The effect of divergence: from a specificational perspective it is
identical to PURE, however the specs are given a partial
correctness interpretation. Computations with the [DIV] effect may
not terminate. *)
new_effect {
DIV : a:Type -> wp:pure_wp a -> Effect
with
return_wp = pure_return
; bind_wp = pure_bind_wp
; if_then_else = pure_if_then_else
; ite_wp = pure_ite_wp
; stronger = pure_stronger
; close_wp = pure_close_wp
; trivial = pure_trivial
}
(** [PURE] computations can be silently promoted for use in a [DIV] context *)
sub_effect PURE ~> DIV { lift_wp = purewp_id }
(** [Div] is the Hoare-style counterpart of the wp-indexed [DIV] *)
unfold
let div_hoare_to_wp (#a:Type) (#pre:pure_pre) (post:pure_post' a pre) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
fun (p:pure_post a) -> pre /\ (forall a. post a ==> p a)
effect Div (a: Type) (pre: pure_pre) (post: pure_post' a pre) =
DIV a (div_hoare_to_wp post)
(** [Dv] is the instance of [DIV] with trivial pre- and postconditions *)
effect Dv (a: Type) = DIV a (pure_null_wp a)
(** We use the [EXT] effect to underspecify external system calls
as being impure but having no observable effect on the state *)
effect EXT (a: Type) = Dv a
/// The [STATE_h] effect template for stateful computations, generic
/// in the type of the state.
///
/// Note, [STATE_h] is itself not a computation type in F*, since it
/// is parameterized by the type of heap. However, instantiations of
/// [STATE_h] with specific types of the heap are computation
/// types. See, e.g., [FStar.ST] for such instantiations.
///
/// Weakest preconditions for stateful computations transform
/// [st_post_h] postconditions to [st_pre_h] preconditions. Both are
/// parametric in the type of the state, here denoted by the
/// [heap:Type] variable.
(** Preconditions are predicates on the [heap] *)
let st_pre_h (heap: Type) = heap -> GTot Type0
(** Postconditions relate [a]-typed results to the final [heap], here
refined by some pure proposition [pre], typically instantiated to
the precondition applied to the initial [heap] *)
let st_post_h' (heap a pre: Type) = a -> _: heap{pre} -> GTot Type0
(** Postconditions without refinements *)
let st_post_h (heap a: Type) = st_post_h' heap a True
(** The type of the main WP-transformer for stateful computations *)
let st_wp_h (heap a: Type) = st_post_h heap a -> Tot (st_pre_h heap)
(** Returning a value does not transform the state *)
unfold
let st_return (heap a: Type) (x: a) (p: st_post_h heap a) = p x
(** Sequential composition of stateful WPs *)
unfold
let st_bind_wp
(heap: Type)
(a b: Type)
(wp1: st_wp_h heap a)
(wp2: (a -> GTot (st_wp_h heap b)))
(p: st_post_h heap b)
(h0: heap)
= wp1 (fun a h1 -> wp2 a p h1) h0
(** Branching for stateful WPs *)
unfold
let st_if_then_else
(heap a p: Type)
(wp_then wp_else: st_wp_h heap a)
(post: st_post_h heap a)
(h0: heap)
= wp_then post h0 /\ (~p ==> wp_else post h0)
(** As with [PURE] the [wp] combinator names the postcondition as
[k] to avoid duplicating it. *)
unfold
let st_ite_wp (heap a: Type) (wp: st_wp_h heap a) (post: st_post_h heap a) (h0: heap) =
forall (k: st_post_h heap a).
(forall (x: a) (h: heap). {:pattern (guard_free (k x h))} post x h ==> k x h) ==> wp k h0
(** Subsumption for stateful WPs *)
unfold
let st_stronger (heap a: Type) (wp1 wp2: st_wp_h heap a) =
(forall (p: st_post_h heap a) (h: heap). wp1 p h ==> wp2 p h)
(** Closing the scope of a binder within a stateful WP *)
unfold
let st_close_wp (heap a b: Type) (wp: (b -> GTot (st_wp_h heap a))) (p: st_post_h heap a) (h: heap) =
(forall (b: b). wp b p h)
(** Applying a stateful WP to a trivial postcondition *)
unfold
let st_trivial (heap a: Type) (wp: st_wp_h heap a) = (forall h0. wp (fun r h1 -> True) h0)
(** Introducing a new effect template [STATE_h] *)
new_effect {
STATE_h (heap: Type) : result: Type -> wp: st_wp_h heap result -> Effect
with
return_wp = st_return heap
; bind_wp = st_bind_wp heap
; if_then_else = st_if_then_else heap
; ite_wp = st_ite_wp heap
; stronger = st_stronger heap
; close_wp = st_close_wp heap
; trivial = st_trivial heap
}
/// The [EXN] effect for computations that may raise exceptions or
/// fatal errors
///
/// Weakest preconditions for stateful computations transform
/// [ex_post] postconditions (predicates on [result]s) to [ex_pre]
/// precondition propositions.
(** Normal results are represented using [V x].
Handleable exceptions are represented [E e].
Fatal errors are [Err msg]. *)
noeq
type result (a: Type) =
| V : v: a -> result a
| E : e: exn -> result a
| Err : msg: string -> result a
(** Exceptional preconditions are just propositions *)
let ex_pre = Type0
(** Postconditions on results refined by a precondition *)
let ex_post' (a pre: Type) = _: result a {pre} -> GTot Type0
(** Postconditions on results *)
let ex_post (a: Type) = ex_post' a True
(** Exceptions WP-predicate transformers *)
let ex_wp (a: Type) = ex_post a -> GTot ex_pre
(** Returning a value [x] normally promotes it to the [V x] result *)
unfold
let ex_return (a: Type) (x: a) (p: ex_post a) : GTot Type0 = p (V x)
(** Sequential composition of exception-raising code requires case analysing
the result of the first computation before "running" the second one *)
unfold
let ex_bind_wp (a b: Type) (wp1: ex_wp a) (wp2: (a -> GTot (ex_wp b))) (p: ex_post b)
: GTot Type0 =
forall (k: ex_post b).
(forall (rb: result b). {:pattern (guard_free (k rb))} p rb ==> k rb) ==>
(wp1 (function
| V ra1 -> wp2 ra1 k
| E e -> k (E e)
| Err m -> k (Err m)))
(** As for other effects, branching in [ex_wp] appears in two forms.
First, a simple case analysis on [p] *)
unfold
let ex_if_then_else (a p: Type) (wp_then wp_else: ex_wp a) (post: ex_post a) =
wp_then post /\ (~p ==> wp_else post)
(** Naming continuations for use with branching *)
unfold
let ex_ite_wp (a: Type) (wp: ex_wp a) (post: ex_post a) =
forall (k: ex_post a).
(forall (rb: result a). {:pattern (guard_free (k rb))} post rb ==> k rb) ==> wp k
(** Subsumption for exceptional WPs *)
unfold
let ex_stronger (a: Type) (wp1 wp2: ex_wp a) = (forall (p: ex_post a). wp1 p ==> wp2 p)
(** Closing the scope of a binder for exceptional WPs *)
unfold
let ex_close_wp (a b: Type) (wp: (b -> GTot (ex_wp a))) (p: ex_post a) = (forall (b: b). wp b p)
(** Applying a computation with a trivial postcondition *)
unfold
let ex_trivial (a: Type) (wp: ex_wp a) = wp (fun r -> True)
(** Introduce a new effect for [EXN] *)
new_effect {
EXN : result: Type -> wp: ex_wp result -> Effect
with
return_wp = ex_return
; bind_wp = ex_bind_wp
; if_then_else = ex_if_then_else
; ite_wp = ex_ite_wp
; stronger = ex_stronger
; close_wp = ex_close_wp
; trivial = ex_trivial
}
(** A Hoare-style abbreviation for EXN *)
effect Exn (a: Type) (pre: ex_pre) (post: ex_post' a pre) =
EXN a (fun (p: ex_post a) -> pre /\ (forall (r: result a). post r ==> p r))
(** We include divergence in exceptions.
NOTE: BE WARNED, CODE IN THE [EXN] EFFECT IS ONLY CHECKED FOR
PARTIAL CORRECTNESS *) | false | false | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val lift_div_exn : a: Type -> wp: Prims.pure_wp a -> p: FStar.Pervasives.ex_post a -> Prims.pure_pre | [] | FStar.Pervasives.lift_div_exn | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: Type -> wp: Prims.pure_wp a -> p: FStar.Pervasives.ex_post a -> Prims.pure_pre | {
"end_col": 81,
"end_line": 595,
"start_col": 60,
"start_line": 595
} |
|
Prims.Tot | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let all_pre_h (h: Type) = h -> GTot Type0 | let all_pre_h (h: Type) = | false | null | false | h -> GTot Type0 | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"total"
] | [] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold
let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp
unfold
let pure_close_wp (a b:Type) (wp:b -> Tot (pure_wp a)) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp
unfold
let pure_null_wp (a:Type) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_null_wp0 a
[@@ "opaque_to_smt"]
unfold
let pure_assert_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assert_wp0 p
[@@ "opaque_to_smt"]
unfold
let pure_assume_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assume_wp0 p
/// The [DIV] effect for divergent computations
///
/// The wp-calculus for [DIV] is same as that of [PURE]
(** The effect of divergence: from a specificational perspective it is
identical to PURE, however the specs are given a partial
correctness interpretation. Computations with the [DIV] effect may
not terminate. *)
new_effect {
DIV : a:Type -> wp:pure_wp a -> Effect
with
return_wp = pure_return
; bind_wp = pure_bind_wp
; if_then_else = pure_if_then_else
; ite_wp = pure_ite_wp
; stronger = pure_stronger
; close_wp = pure_close_wp
; trivial = pure_trivial
}
(** [PURE] computations can be silently promoted for use in a [DIV] context *)
sub_effect PURE ~> DIV { lift_wp = purewp_id }
(** [Div] is the Hoare-style counterpart of the wp-indexed [DIV] *)
unfold
let div_hoare_to_wp (#a:Type) (#pre:pure_pre) (post:pure_post' a pre) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
fun (p:pure_post a) -> pre /\ (forall a. post a ==> p a)
effect Div (a: Type) (pre: pure_pre) (post: pure_post' a pre) =
DIV a (div_hoare_to_wp post)
(** [Dv] is the instance of [DIV] with trivial pre- and postconditions *)
effect Dv (a: Type) = DIV a (pure_null_wp a)
(** We use the [EXT] effect to underspecify external system calls
as being impure but having no observable effect on the state *)
effect EXT (a: Type) = Dv a
/// The [STATE_h] effect template for stateful computations, generic
/// in the type of the state.
///
/// Note, [STATE_h] is itself not a computation type in F*, since it
/// is parameterized by the type of heap. However, instantiations of
/// [STATE_h] with specific types of the heap are computation
/// types. See, e.g., [FStar.ST] for such instantiations.
///
/// Weakest preconditions for stateful computations transform
/// [st_post_h] postconditions to [st_pre_h] preconditions. Both are
/// parametric in the type of the state, here denoted by the
/// [heap:Type] variable.
(** Preconditions are predicates on the [heap] *)
let st_pre_h (heap: Type) = heap -> GTot Type0
(** Postconditions relate [a]-typed results to the final [heap], here
refined by some pure proposition [pre], typically instantiated to
the precondition applied to the initial [heap] *)
let st_post_h' (heap a pre: Type) = a -> _: heap{pre} -> GTot Type0
(** Postconditions without refinements *)
let st_post_h (heap a: Type) = st_post_h' heap a True
(** The type of the main WP-transformer for stateful computations *)
let st_wp_h (heap a: Type) = st_post_h heap a -> Tot (st_pre_h heap)
(** Returning a value does not transform the state *)
unfold
let st_return (heap a: Type) (x: a) (p: st_post_h heap a) = p x
(** Sequential composition of stateful WPs *)
unfold
let st_bind_wp
(heap: Type)
(a b: Type)
(wp1: st_wp_h heap a)
(wp2: (a -> GTot (st_wp_h heap b)))
(p: st_post_h heap b)
(h0: heap)
= wp1 (fun a h1 -> wp2 a p h1) h0
(** Branching for stateful WPs *)
unfold
let st_if_then_else
(heap a p: Type)
(wp_then wp_else: st_wp_h heap a)
(post: st_post_h heap a)
(h0: heap)
= wp_then post h0 /\ (~p ==> wp_else post h0)
(** As with [PURE] the [wp] combinator names the postcondition as
[k] to avoid duplicating it. *)
unfold
let st_ite_wp (heap a: Type) (wp: st_wp_h heap a) (post: st_post_h heap a) (h0: heap) =
forall (k: st_post_h heap a).
(forall (x: a) (h: heap). {:pattern (guard_free (k x h))} post x h ==> k x h) ==> wp k h0
(** Subsumption for stateful WPs *)
unfold
let st_stronger (heap a: Type) (wp1 wp2: st_wp_h heap a) =
(forall (p: st_post_h heap a) (h: heap). wp1 p h ==> wp2 p h)
(** Closing the scope of a binder within a stateful WP *)
unfold
let st_close_wp (heap a b: Type) (wp: (b -> GTot (st_wp_h heap a))) (p: st_post_h heap a) (h: heap) =
(forall (b: b). wp b p h)
(** Applying a stateful WP to a trivial postcondition *)
unfold
let st_trivial (heap a: Type) (wp: st_wp_h heap a) = (forall h0. wp (fun r h1 -> True) h0)
(** Introducing a new effect template [STATE_h] *)
new_effect {
STATE_h (heap: Type) : result: Type -> wp: st_wp_h heap result -> Effect
with
return_wp = st_return heap
; bind_wp = st_bind_wp heap
; if_then_else = st_if_then_else heap
; ite_wp = st_ite_wp heap
; stronger = st_stronger heap
; close_wp = st_close_wp heap
; trivial = st_trivial heap
}
/// The [EXN] effect for computations that may raise exceptions or
/// fatal errors
///
/// Weakest preconditions for stateful computations transform
/// [ex_post] postconditions (predicates on [result]s) to [ex_pre]
/// precondition propositions.
(** Normal results are represented using [V x].
Handleable exceptions are represented [E e].
Fatal errors are [Err msg]. *)
noeq
type result (a: Type) =
| V : v: a -> result a
| E : e: exn -> result a
| Err : msg: string -> result a
(** Exceptional preconditions are just propositions *)
let ex_pre = Type0
(** Postconditions on results refined by a precondition *)
let ex_post' (a pre: Type) = _: result a {pre} -> GTot Type0
(** Postconditions on results *)
let ex_post (a: Type) = ex_post' a True
(** Exceptions WP-predicate transformers *)
let ex_wp (a: Type) = ex_post a -> GTot ex_pre
(** Returning a value [x] normally promotes it to the [V x] result *)
unfold
let ex_return (a: Type) (x: a) (p: ex_post a) : GTot Type0 = p (V x)
(** Sequential composition of exception-raising code requires case analysing
the result of the first computation before "running" the second one *)
unfold
let ex_bind_wp (a b: Type) (wp1: ex_wp a) (wp2: (a -> GTot (ex_wp b))) (p: ex_post b)
: GTot Type0 =
forall (k: ex_post b).
(forall (rb: result b). {:pattern (guard_free (k rb))} p rb ==> k rb) ==>
(wp1 (function
| V ra1 -> wp2 ra1 k
| E e -> k (E e)
| Err m -> k (Err m)))
(** As for other effects, branching in [ex_wp] appears in two forms.
First, a simple case analysis on [p] *)
unfold
let ex_if_then_else (a p: Type) (wp_then wp_else: ex_wp a) (post: ex_post a) =
wp_then post /\ (~p ==> wp_else post)
(** Naming continuations for use with branching *)
unfold
let ex_ite_wp (a: Type) (wp: ex_wp a) (post: ex_post a) =
forall (k: ex_post a).
(forall (rb: result a). {:pattern (guard_free (k rb))} post rb ==> k rb) ==> wp k
(** Subsumption for exceptional WPs *)
unfold
let ex_stronger (a: Type) (wp1 wp2: ex_wp a) = (forall (p: ex_post a). wp1 p ==> wp2 p)
(** Closing the scope of a binder for exceptional WPs *)
unfold
let ex_close_wp (a b: Type) (wp: (b -> GTot (ex_wp a))) (p: ex_post a) = (forall (b: b). wp b p)
(** Applying a computation with a trivial postcondition *)
unfold
let ex_trivial (a: Type) (wp: ex_wp a) = wp (fun r -> True)
(** Introduce a new effect for [EXN] *)
new_effect {
EXN : result: Type -> wp: ex_wp result -> Effect
with
return_wp = ex_return
; bind_wp = ex_bind_wp
; if_then_else = ex_if_then_else
; ite_wp = ex_ite_wp
; stronger = ex_stronger
; close_wp = ex_close_wp
; trivial = ex_trivial
}
(** A Hoare-style abbreviation for EXN *)
effect Exn (a: Type) (pre: ex_pre) (post: ex_post' a pre) =
EXN a (fun (p: ex_post a) -> pre /\ (forall (r: result a). post r ==> p r))
(** We include divergence in exceptions.
NOTE: BE WARNED, CODE IN THE [EXN] EFFECT IS ONLY CHECKED FOR
PARTIAL CORRECTNESS *)
unfold
let lift_div_exn (a: Type) (wp: pure_wp a) (p: ex_post a) = wp (fun a -> p (V a))
sub_effect DIV ~> EXN { lift_wp = lift_div_exn }
(** A variant of [Exn] with trivial pre- and postconditions *)
effect Ex (a: Type) = Exn a True (fun v -> True)
/// The [ALL_h] effect template for computations that may diverge,
/// raise exceptions or fatal errors, and uses a generic state.
///
/// Note, this effect is poorly named, particularly as F* has since
/// gained many more user-defined effect. We no longer have an effect
/// that includes all others.
///
/// We might rename this in the future to something like [StExnDiv_h].
///
/// We layer state on top of exceptions, meaning that raising an
/// exception does not discard the state.
///
/// As with [STATE_h], [ALL_h] is not a computation type, though its
/// instantiation with a specific type of [heap] (in FStar.All) is. | false | true | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val all_pre_h : h: Type -> Type | [] | FStar.Pervasives.all_pre_h | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | h: Type -> Type | {
"end_col": 41,
"end_line": 617,
"start_col": 26,
"start_line": 617
} |
|
Prims.Tot | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let st_close_wp (heap a b: Type) (wp: (b -> GTot (st_wp_h heap a))) (p: st_post_h heap a) (h: heap) =
(forall (b: b). wp b p h) | let st_close_wp (heap a b: Type) (wp: (b -> GTot (st_wp_h heap a))) (p: st_post_h heap a) (h: heap) = | false | null | false | (forall (b: b). wp b p h) | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"total"
] | [
"FStar.Pervasives.st_wp_h",
"FStar.Pervasives.st_post_h",
"Prims.l_Forall",
"Prims.logical"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold
let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp
unfold
let pure_close_wp (a b:Type) (wp:b -> Tot (pure_wp a)) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp
unfold
let pure_null_wp (a:Type) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_null_wp0 a
[@@ "opaque_to_smt"]
unfold
let pure_assert_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assert_wp0 p
[@@ "opaque_to_smt"]
unfold
let pure_assume_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assume_wp0 p
/// The [DIV] effect for divergent computations
///
/// The wp-calculus for [DIV] is same as that of [PURE]
(** The effect of divergence: from a specificational perspective it is
identical to PURE, however the specs are given a partial
correctness interpretation. Computations with the [DIV] effect may
not terminate. *)
new_effect {
DIV : a:Type -> wp:pure_wp a -> Effect
with
return_wp = pure_return
; bind_wp = pure_bind_wp
; if_then_else = pure_if_then_else
; ite_wp = pure_ite_wp
; stronger = pure_stronger
; close_wp = pure_close_wp
; trivial = pure_trivial
}
(** [PURE] computations can be silently promoted for use in a [DIV] context *)
sub_effect PURE ~> DIV { lift_wp = purewp_id }
(** [Div] is the Hoare-style counterpart of the wp-indexed [DIV] *)
unfold
let div_hoare_to_wp (#a:Type) (#pre:pure_pre) (post:pure_post' a pre) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
fun (p:pure_post a) -> pre /\ (forall a. post a ==> p a)
effect Div (a: Type) (pre: pure_pre) (post: pure_post' a pre) =
DIV a (div_hoare_to_wp post)
(** [Dv] is the instance of [DIV] with trivial pre- and postconditions *)
effect Dv (a: Type) = DIV a (pure_null_wp a)
(** We use the [EXT] effect to underspecify external system calls
as being impure but having no observable effect on the state *)
effect EXT (a: Type) = Dv a
/// The [STATE_h] effect template for stateful computations, generic
/// in the type of the state.
///
/// Note, [STATE_h] is itself not a computation type in F*, since it
/// is parameterized by the type of heap. However, instantiations of
/// [STATE_h] with specific types of the heap are computation
/// types. See, e.g., [FStar.ST] for such instantiations.
///
/// Weakest preconditions for stateful computations transform
/// [st_post_h] postconditions to [st_pre_h] preconditions. Both are
/// parametric in the type of the state, here denoted by the
/// [heap:Type] variable.
(** Preconditions are predicates on the [heap] *)
let st_pre_h (heap: Type) = heap -> GTot Type0
(** Postconditions relate [a]-typed results to the final [heap], here
refined by some pure proposition [pre], typically instantiated to
the precondition applied to the initial [heap] *)
let st_post_h' (heap a pre: Type) = a -> _: heap{pre} -> GTot Type0
(** Postconditions without refinements *)
let st_post_h (heap a: Type) = st_post_h' heap a True
(** The type of the main WP-transformer for stateful computations *)
let st_wp_h (heap a: Type) = st_post_h heap a -> Tot (st_pre_h heap)
(** Returning a value does not transform the state *)
unfold
let st_return (heap a: Type) (x: a) (p: st_post_h heap a) = p x
(** Sequential composition of stateful WPs *)
unfold
let st_bind_wp
(heap: Type)
(a b: Type)
(wp1: st_wp_h heap a)
(wp2: (a -> GTot (st_wp_h heap b)))
(p: st_post_h heap b)
(h0: heap)
= wp1 (fun a h1 -> wp2 a p h1) h0
(** Branching for stateful WPs *)
unfold
let st_if_then_else
(heap a p: Type)
(wp_then wp_else: st_wp_h heap a)
(post: st_post_h heap a)
(h0: heap)
= wp_then post h0 /\ (~p ==> wp_else post h0)
(** As with [PURE] the [wp] combinator names the postcondition as
[k] to avoid duplicating it. *)
unfold
let st_ite_wp (heap a: Type) (wp: st_wp_h heap a) (post: st_post_h heap a) (h0: heap) =
forall (k: st_post_h heap a).
(forall (x: a) (h: heap). {:pattern (guard_free (k x h))} post x h ==> k x h) ==> wp k h0
(** Subsumption for stateful WPs *)
unfold
let st_stronger (heap a: Type) (wp1 wp2: st_wp_h heap a) =
(forall (p: st_post_h heap a) (h: heap). wp1 p h ==> wp2 p h)
(** Closing the scope of a binder within a stateful WP *)
unfold | false | false | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val st_close_wp : heap: Type ->
a: Type ->
b: Type ->
wp: (_: b -> Prims.GTot (FStar.Pervasives.st_wp_h heap a)) ->
p: FStar.Pervasives.st_post_h heap a ->
h: heap
-> Prims.logical | [] | FStar.Pervasives.st_close_wp | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
heap: Type ->
a: Type ->
b: Type ->
wp: (_: b -> Prims.GTot (FStar.Pervasives.st_wp_h heap a)) ->
p: FStar.Pervasives.st_post_h heap a ->
h: heap
-> Prims.logical | {
"end_col": 27,
"end_line": 486,
"start_col": 2,
"start_line": 486
} |
|
Prims.Tot | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let all_trivial (heap a: Type) (wp: all_wp_h heap a) = (forall (h0: heap). wp (fun r h1 -> True) h0) | let all_trivial (heap a: Type) (wp: all_wp_h heap a) = | false | null | false | (forall (h0: heap). wp (fun r h1 -> True) h0) | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"total"
] | [
"FStar.Pervasives.all_wp_h",
"Prims.l_Forall",
"FStar.Pervasives.result",
"Prims.l_True",
"Prims.logical"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold
let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp
unfold
let pure_close_wp (a b:Type) (wp:b -> Tot (pure_wp a)) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp
unfold
let pure_null_wp (a:Type) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_null_wp0 a
[@@ "opaque_to_smt"]
unfold
let pure_assert_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assert_wp0 p
[@@ "opaque_to_smt"]
unfold
let pure_assume_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assume_wp0 p
/// The [DIV] effect for divergent computations
///
/// The wp-calculus for [DIV] is same as that of [PURE]
(** The effect of divergence: from a specificational perspective it is
identical to PURE, however the specs are given a partial
correctness interpretation. Computations with the [DIV] effect may
not terminate. *)
new_effect {
DIV : a:Type -> wp:pure_wp a -> Effect
with
return_wp = pure_return
; bind_wp = pure_bind_wp
; if_then_else = pure_if_then_else
; ite_wp = pure_ite_wp
; stronger = pure_stronger
; close_wp = pure_close_wp
; trivial = pure_trivial
}
(** [PURE] computations can be silently promoted for use in a [DIV] context *)
sub_effect PURE ~> DIV { lift_wp = purewp_id }
(** [Div] is the Hoare-style counterpart of the wp-indexed [DIV] *)
unfold
let div_hoare_to_wp (#a:Type) (#pre:pure_pre) (post:pure_post' a pre) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
fun (p:pure_post a) -> pre /\ (forall a. post a ==> p a)
effect Div (a: Type) (pre: pure_pre) (post: pure_post' a pre) =
DIV a (div_hoare_to_wp post)
(** [Dv] is the instance of [DIV] with trivial pre- and postconditions *)
effect Dv (a: Type) = DIV a (pure_null_wp a)
(** We use the [EXT] effect to underspecify external system calls
as being impure but having no observable effect on the state *)
effect EXT (a: Type) = Dv a
/// The [STATE_h] effect template for stateful computations, generic
/// in the type of the state.
///
/// Note, [STATE_h] is itself not a computation type in F*, since it
/// is parameterized by the type of heap. However, instantiations of
/// [STATE_h] with specific types of the heap are computation
/// types. See, e.g., [FStar.ST] for such instantiations.
///
/// Weakest preconditions for stateful computations transform
/// [st_post_h] postconditions to [st_pre_h] preconditions. Both are
/// parametric in the type of the state, here denoted by the
/// [heap:Type] variable.
(** Preconditions are predicates on the [heap] *)
let st_pre_h (heap: Type) = heap -> GTot Type0
(** Postconditions relate [a]-typed results to the final [heap], here
refined by some pure proposition [pre], typically instantiated to
the precondition applied to the initial [heap] *)
let st_post_h' (heap a pre: Type) = a -> _: heap{pre} -> GTot Type0
(** Postconditions without refinements *)
let st_post_h (heap a: Type) = st_post_h' heap a True
(** The type of the main WP-transformer for stateful computations *)
let st_wp_h (heap a: Type) = st_post_h heap a -> Tot (st_pre_h heap)
(** Returning a value does not transform the state *)
unfold
let st_return (heap a: Type) (x: a) (p: st_post_h heap a) = p x
(** Sequential composition of stateful WPs *)
unfold
let st_bind_wp
(heap: Type)
(a b: Type)
(wp1: st_wp_h heap a)
(wp2: (a -> GTot (st_wp_h heap b)))
(p: st_post_h heap b)
(h0: heap)
= wp1 (fun a h1 -> wp2 a p h1) h0
(** Branching for stateful WPs *)
unfold
let st_if_then_else
(heap a p: Type)
(wp_then wp_else: st_wp_h heap a)
(post: st_post_h heap a)
(h0: heap)
= wp_then post h0 /\ (~p ==> wp_else post h0)
(** As with [PURE] the [wp] combinator names the postcondition as
[k] to avoid duplicating it. *)
unfold
let st_ite_wp (heap a: Type) (wp: st_wp_h heap a) (post: st_post_h heap a) (h0: heap) =
forall (k: st_post_h heap a).
(forall (x: a) (h: heap). {:pattern (guard_free (k x h))} post x h ==> k x h) ==> wp k h0
(** Subsumption for stateful WPs *)
unfold
let st_stronger (heap a: Type) (wp1 wp2: st_wp_h heap a) =
(forall (p: st_post_h heap a) (h: heap). wp1 p h ==> wp2 p h)
(** Closing the scope of a binder within a stateful WP *)
unfold
let st_close_wp (heap a b: Type) (wp: (b -> GTot (st_wp_h heap a))) (p: st_post_h heap a) (h: heap) =
(forall (b: b). wp b p h)
(** Applying a stateful WP to a trivial postcondition *)
unfold
let st_trivial (heap a: Type) (wp: st_wp_h heap a) = (forall h0. wp (fun r h1 -> True) h0)
(** Introducing a new effect template [STATE_h] *)
new_effect {
STATE_h (heap: Type) : result: Type -> wp: st_wp_h heap result -> Effect
with
return_wp = st_return heap
; bind_wp = st_bind_wp heap
; if_then_else = st_if_then_else heap
; ite_wp = st_ite_wp heap
; stronger = st_stronger heap
; close_wp = st_close_wp heap
; trivial = st_trivial heap
}
/// The [EXN] effect for computations that may raise exceptions or
/// fatal errors
///
/// Weakest preconditions for stateful computations transform
/// [ex_post] postconditions (predicates on [result]s) to [ex_pre]
/// precondition propositions.
(** Normal results are represented using [V x].
Handleable exceptions are represented [E e].
Fatal errors are [Err msg]. *)
noeq
type result (a: Type) =
| V : v: a -> result a
| E : e: exn -> result a
| Err : msg: string -> result a
(** Exceptional preconditions are just propositions *)
let ex_pre = Type0
(** Postconditions on results refined by a precondition *)
let ex_post' (a pre: Type) = _: result a {pre} -> GTot Type0
(** Postconditions on results *)
let ex_post (a: Type) = ex_post' a True
(** Exceptions WP-predicate transformers *)
let ex_wp (a: Type) = ex_post a -> GTot ex_pre
(** Returning a value [x] normally promotes it to the [V x] result *)
unfold
let ex_return (a: Type) (x: a) (p: ex_post a) : GTot Type0 = p (V x)
(** Sequential composition of exception-raising code requires case analysing
the result of the first computation before "running" the second one *)
unfold
let ex_bind_wp (a b: Type) (wp1: ex_wp a) (wp2: (a -> GTot (ex_wp b))) (p: ex_post b)
: GTot Type0 =
forall (k: ex_post b).
(forall (rb: result b). {:pattern (guard_free (k rb))} p rb ==> k rb) ==>
(wp1 (function
| V ra1 -> wp2 ra1 k
| E e -> k (E e)
| Err m -> k (Err m)))
(** As for other effects, branching in [ex_wp] appears in two forms.
First, a simple case analysis on [p] *)
unfold
let ex_if_then_else (a p: Type) (wp_then wp_else: ex_wp a) (post: ex_post a) =
wp_then post /\ (~p ==> wp_else post)
(** Naming continuations for use with branching *)
unfold
let ex_ite_wp (a: Type) (wp: ex_wp a) (post: ex_post a) =
forall (k: ex_post a).
(forall (rb: result a). {:pattern (guard_free (k rb))} post rb ==> k rb) ==> wp k
(** Subsumption for exceptional WPs *)
unfold
let ex_stronger (a: Type) (wp1 wp2: ex_wp a) = (forall (p: ex_post a). wp1 p ==> wp2 p)
(** Closing the scope of a binder for exceptional WPs *)
unfold
let ex_close_wp (a b: Type) (wp: (b -> GTot (ex_wp a))) (p: ex_post a) = (forall (b: b). wp b p)
(** Applying a computation with a trivial postcondition *)
unfold
let ex_trivial (a: Type) (wp: ex_wp a) = wp (fun r -> True)
(** Introduce a new effect for [EXN] *)
new_effect {
EXN : result: Type -> wp: ex_wp result -> Effect
with
return_wp = ex_return
; bind_wp = ex_bind_wp
; if_then_else = ex_if_then_else
; ite_wp = ex_ite_wp
; stronger = ex_stronger
; close_wp = ex_close_wp
; trivial = ex_trivial
}
(** A Hoare-style abbreviation for EXN *)
effect Exn (a: Type) (pre: ex_pre) (post: ex_post' a pre) =
EXN a (fun (p: ex_post a) -> pre /\ (forall (r: result a). post r ==> p r))
(** We include divergence in exceptions.
NOTE: BE WARNED, CODE IN THE [EXN] EFFECT IS ONLY CHECKED FOR
PARTIAL CORRECTNESS *)
unfold
let lift_div_exn (a: Type) (wp: pure_wp a) (p: ex_post a) = wp (fun a -> p (V a))
sub_effect DIV ~> EXN { lift_wp = lift_div_exn }
(** A variant of [Exn] with trivial pre- and postconditions *)
effect Ex (a: Type) = Exn a True (fun v -> True)
/// The [ALL_h] effect template for computations that may diverge,
/// raise exceptions or fatal errors, and uses a generic state.
///
/// Note, this effect is poorly named, particularly as F* has since
/// gained many more user-defined effect. We no longer have an effect
/// that includes all others.
///
/// We might rename this in the future to something like [StExnDiv_h].
///
/// We layer state on top of exceptions, meaning that raising an
/// exception does not discard the state.
///
/// As with [STATE_h], [ALL_h] is not a computation type, though its
/// instantiation with a specific type of [heap] (in FStar.All) is.
(** [all_pre_h] is a predicate on the initial state *)
let all_pre_h (h: Type) = h -> GTot Type0
(** Postconditions relate [result]s to final [heap]s refined by a precondition *)
let all_post_h' (h a pre: Type) = result a -> _: h{pre} -> GTot Type0
(** A variant of [all_post_h'] without the precondition refinement *)
let all_post_h (h a: Type) = all_post_h' h a True
(** WP predicate transformers for the [All_h] effect template *)
let all_wp_h (h a: Type) = all_post_h h a -> Tot (all_pre_h h)
(** Returning a value [x] normally promotes it to the [V x] result
without touching the [heap] *)
unfold
let all_return (heap a: Type) (x: a) (p: all_post_h heap a) = p (V x)
(** Sequential composition for [ALL_h] is like [EXN]: case analysis of
the exceptional result before "running" the continuation *)
unfold
let all_bind_wp
(heap: Type)
(a b: Type)
(wp1: all_wp_h heap a)
(wp2: (a -> GTot (all_wp_h heap b)))
(p: all_post_h heap b)
(h0: heap)
: GTot Type0 =
wp1 (fun ra h1 ->
(match ra with
| V v -> wp2 v p h1
| E e -> p (E e) h1
| Err msg -> p (Err msg) h1))
h0
(** Case analysis in [ALL_h] *)
unfold
let all_if_then_else
(heap a p: Type)
(wp_then wp_else: all_wp_h heap a)
(post: all_post_h heap a)
(h0: heap)
= wp_then post h0 /\ (~p ==> wp_else post h0)
(** Naming postcondition for better sharing in [ALL_h] *)
unfold
let all_ite_wp (heap a: Type) (wp: all_wp_h heap a) (post: all_post_h heap a) (h0: heap) =
forall (k: all_post_h heap a).
(forall (x: result a) (h: heap). {:pattern (guard_free (k x h))} post x h ==> k x h) ==> wp k h0
(** Subsumption in [ALL_h] *)
unfold
let all_stronger (heap a: Type) (wp1 wp2: all_wp_h heap a) =
(forall (p: all_post_h heap a) (h: heap). wp1 p h ==> wp2 p h)
(** Closing a binder in the scope of an [ALL_h] wp *)
unfold
let all_close_wp
(heap a b: Type)
(wp: (b -> GTot (all_wp_h heap a)))
(p: all_post_h heap a)
(h: heap)
= (forall (b: b). wp b p h)
(** Applying an [ALL_h] wp to a trivial postcondition *) | false | false | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val all_trivial : heap: Type -> a: Type -> wp: FStar.Pervasives.all_wp_h heap a -> Prims.logical | [] | FStar.Pervasives.all_trivial | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | heap: Type -> a: Type -> wp: FStar.Pervasives.all_wp_h heap a -> Prims.logical | {
"end_col": 100,
"end_line": 682,
"start_col": 55,
"start_line": 682
} |
|
Prims.Tot | val dfst (#a: Type) (#b: (a -> GTot Type)) (t: dtuple2 a b) : Tot a | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let dfst (#a: Type) (#b: a -> GTot Type) (t: dtuple2 a b)
: Tot a
= Mkdtuple2?._1 t | val dfst (#a: Type) (#b: (a -> GTot Type)) (t: dtuple2 a b) : Tot a
let dfst (#a: Type) (#b: (a -> GTot Type)) (t: dtuple2 a b) : Tot a = | false | null | false | Mkdtuple2?._1 t | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"total"
] | [
"Prims.dtuple2",
"Prims.__proj__Mkdtuple2__item___1"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold
let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp
unfold
let pure_close_wp (a b:Type) (wp:b -> Tot (pure_wp a)) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp
unfold
let pure_null_wp (a:Type) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_null_wp0 a
[@@ "opaque_to_smt"]
unfold
let pure_assert_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assert_wp0 p
[@@ "opaque_to_smt"]
unfold
let pure_assume_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assume_wp0 p
/// The [DIV] effect for divergent computations
///
/// The wp-calculus for [DIV] is same as that of [PURE]
(** The effect of divergence: from a specificational perspective it is
identical to PURE, however the specs are given a partial
correctness interpretation. Computations with the [DIV] effect may
not terminate. *)
new_effect {
DIV : a:Type -> wp:pure_wp a -> Effect
with
return_wp = pure_return
; bind_wp = pure_bind_wp
; if_then_else = pure_if_then_else
; ite_wp = pure_ite_wp
; stronger = pure_stronger
; close_wp = pure_close_wp
; trivial = pure_trivial
}
(** [PURE] computations can be silently promoted for use in a [DIV] context *)
sub_effect PURE ~> DIV { lift_wp = purewp_id }
(** [Div] is the Hoare-style counterpart of the wp-indexed [DIV] *)
unfold
let div_hoare_to_wp (#a:Type) (#pre:pure_pre) (post:pure_post' a pre) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
fun (p:pure_post a) -> pre /\ (forall a. post a ==> p a)
effect Div (a: Type) (pre: pure_pre) (post: pure_post' a pre) =
DIV a (div_hoare_to_wp post)
(** [Dv] is the instance of [DIV] with trivial pre- and postconditions *)
effect Dv (a: Type) = DIV a (pure_null_wp a)
(** We use the [EXT] effect to underspecify external system calls
as being impure but having no observable effect on the state *)
effect EXT (a: Type) = Dv a
/// The [STATE_h] effect template for stateful computations, generic
/// in the type of the state.
///
/// Note, [STATE_h] is itself not a computation type in F*, since it
/// is parameterized by the type of heap. However, instantiations of
/// [STATE_h] with specific types of the heap are computation
/// types. See, e.g., [FStar.ST] for such instantiations.
///
/// Weakest preconditions for stateful computations transform
/// [st_post_h] postconditions to [st_pre_h] preconditions. Both are
/// parametric in the type of the state, here denoted by the
/// [heap:Type] variable.
(** Preconditions are predicates on the [heap] *)
let st_pre_h (heap: Type) = heap -> GTot Type0
(** Postconditions relate [a]-typed results to the final [heap], here
refined by some pure proposition [pre], typically instantiated to
the precondition applied to the initial [heap] *)
let st_post_h' (heap a pre: Type) = a -> _: heap{pre} -> GTot Type0
(** Postconditions without refinements *)
let st_post_h (heap a: Type) = st_post_h' heap a True
(** The type of the main WP-transformer for stateful computations *)
let st_wp_h (heap a: Type) = st_post_h heap a -> Tot (st_pre_h heap)
(** Returning a value does not transform the state *)
unfold
let st_return (heap a: Type) (x: a) (p: st_post_h heap a) = p x
(** Sequential composition of stateful WPs *)
unfold
let st_bind_wp
(heap: Type)
(a b: Type)
(wp1: st_wp_h heap a)
(wp2: (a -> GTot (st_wp_h heap b)))
(p: st_post_h heap b)
(h0: heap)
= wp1 (fun a h1 -> wp2 a p h1) h0
(** Branching for stateful WPs *)
unfold
let st_if_then_else
(heap a p: Type)
(wp_then wp_else: st_wp_h heap a)
(post: st_post_h heap a)
(h0: heap)
= wp_then post h0 /\ (~p ==> wp_else post h0)
(** As with [PURE] the [wp] combinator names the postcondition as
[k] to avoid duplicating it. *)
unfold
let st_ite_wp (heap a: Type) (wp: st_wp_h heap a) (post: st_post_h heap a) (h0: heap) =
forall (k: st_post_h heap a).
(forall (x: a) (h: heap). {:pattern (guard_free (k x h))} post x h ==> k x h) ==> wp k h0
(** Subsumption for stateful WPs *)
unfold
let st_stronger (heap a: Type) (wp1 wp2: st_wp_h heap a) =
(forall (p: st_post_h heap a) (h: heap). wp1 p h ==> wp2 p h)
(** Closing the scope of a binder within a stateful WP *)
unfold
let st_close_wp (heap a b: Type) (wp: (b -> GTot (st_wp_h heap a))) (p: st_post_h heap a) (h: heap) =
(forall (b: b). wp b p h)
(** Applying a stateful WP to a trivial postcondition *)
unfold
let st_trivial (heap a: Type) (wp: st_wp_h heap a) = (forall h0. wp (fun r h1 -> True) h0)
(** Introducing a new effect template [STATE_h] *)
new_effect {
STATE_h (heap: Type) : result: Type -> wp: st_wp_h heap result -> Effect
with
return_wp = st_return heap
; bind_wp = st_bind_wp heap
; if_then_else = st_if_then_else heap
; ite_wp = st_ite_wp heap
; stronger = st_stronger heap
; close_wp = st_close_wp heap
; trivial = st_trivial heap
}
/// The [EXN] effect for computations that may raise exceptions or
/// fatal errors
///
/// Weakest preconditions for stateful computations transform
/// [ex_post] postconditions (predicates on [result]s) to [ex_pre]
/// precondition propositions.
(** Normal results are represented using [V x].
Handleable exceptions are represented [E e].
Fatal errors are [Err msg]. *)
noeq
type result (a: Type) =
| V : v: a -> result a
| E : e: exn -> result a
| Err : msg: string -> result a
(** Exceptional preconditions are just propositions *)
let ex_pre = Type0
(** Postconditions on results refined by a precondition *)
let ex_post' (a pre: Type) = _: result a {pre} -> GTot Type0
(** Postconditions on results *)
let ex_post (a: Type) = ex_post' a True
(** Exceptions WP-predicate transformers *)
let ex_wp (a: Type) = ex_post a -> GTot ex_pre
(** Returning a value [x] normally promotes it to the [V x] result *)
unfold
let ex_return (a: Type) (x: a) (p: ex_post a) : GTot Type0 = p (V x)
(** Sequential composition of exception-raising code requires case analysing
the result of the first computation before "running" the second one *)
unfold
let ex_bind_wp (a b: Type) (wp1: ex_wp a) (wp2: (a -> GTot (ex_wp b))) (p: ex_post b)
: GTot Type0 =
forall (k: ex_post b).
(forall (rb: result b). {:pattern (guard_free (k rb))} p rb ==> k rb) ==>
(wp1 (function
| V ra1 -> wp2 ra1 k
| E e -> k (E e)
| Err m -> k (Err m)))
(** As for other effects, branching in [ex_wp] appears in two forms.
First, a simple case analysis on [p] *)
unfold
let ex_if_then_else (a p: Type) (wp_then wp_else: ex_wp a) (post: ex_post a) =
wp_then post /\ (~p ==> wp_else post)
(** Naming continuations for use with branching *)
unfold
let ex_ite_wp (a: Type) (wp: ex_wp a) (post: ex_post a) =
forall (k: ex_post a).
(forall (rb: result a). {:pattern (guard_free (k rb))} post rb ==> k rb) ==> wp k
(** Subsumption for exceptional WPs *)
unfold
let ex_stronger (a: Type) (wp1 wp2: ex_wp a) = (forall (p: ex_post a). wp1 p ==> wp2 p)
(** Closing the scope of a binder for exceptional WPs *)
unfold
let ex_close_wp (a b: Type) (wp: (b -> GTot (ex_wp a))) (p: ex_post a) = (forall (b: b). wp b p)
(** Applying a computation with a trivial postcondition *)
unfold
let ex_trivial (a: Type) (wp: ex_wp a) = wp (fun r -> True)
(** Introduce a new effect for [EXN] *)
new_effect {
EXN : result: Type -> wp: ex_wp result -> Effect
with
return_wp = ex_return
; bind_wp = ex_bind_wp
; if_then_else = ex_if_then_else
; ite_wp = ex_ite_wp
; stronger = ex_stronger
; close_wp = ex_close_wp
; trivial = ex_trivial
}
(** A Hoare-style abbreviation for EXN *)
effect Exn (a: Type) (pre: ex_pre) (post: ex_post' a pre) =
EXN a (fun (p: ex_post a) -> pre /\ (forall (r: result a). post r ==> p r))
(** We include divergence in exceptions.
NOTE: BE WARNED, CODE IN THE [EXN] EFFECT IS ONLY CHECKED FOR
PARTIAL CORRECTNESS *)
unfold
let lift_div_exn (a: Type) (wp: pure_wp a) (p: ex_post a) = wp (fun a -> p (V a))
sub_effect DIV ~> EXN { lift_wp = lift_div_exn }
(** A variant of [Exn] with trivial pre- and postconditions *)
effect Ex (a: Type) = Exn a True (fun v -> True)
/// The [ALL_h] effect template for computations that may diverge,
/// raise exceptions or fatal errors, and uses a generic state.
///
/// Note, this effect is poorly named, particularly as F* has since
/// gained many more user-defined effect. We no longer have an effect
/// that includes all others.
///
/// We might rename this in the future to something like [StExnDiv_h].
///
/// We layer state on top of exceptions, meaning that raising an
/// exception does not discard the state.
///
/// As with [STATE_h], [ALL_h] is not a computation type, though its
/// instantiation with a specific type of [heap] (in FStar.All) is.
(** [all_pre_h] is a predicate on the initial state *)
let all_pre_h (h: Type) = h -> GTot Type0
(** Postconditions relate [result]s to final [heap]s refined by a precondition *)
let all_post_h' (h a pre: Type) = result a -> _: h{pre} -> GTot Type0
(** A variant of [all_post_h'] without the precondition refinement *)
let all_post_h (h a: Type) = all_post_h' h a True
(** WP predicate transformers for the [All_h] effect template *)
let all_wp_h (h a: Type) = all_post_h h a -> Tot (all_pre_h h)
(** Returning a value [x] normally promotes it to the [V x] result
without touching the [heap] *)
unfold
let all_return (heap a: Type) (x: a) (p: all_post_h heap a) = p (V x)
(** Sequential composition for [ALL_h] is like [EXN]: case analysis of
the exceptional result before "running" the continuation *)
unfold
let all_bind_wp
(heap: Type)
(a b: Type)
(wp1: all_wp_h heap a)
(wp2: (a -> GTot (all_wp_h heap b)))
(p: all_post_h heap b)
(h0: heap)
: GTot Type0 =
wp1 (fun ra h1 ->
(match ra with
| V v -> wp2 v p h1
| E e -> p (E e) h1
| Err msg -> p (Err msg) h1))
h0
(** Case analysis in [ALL_h] *)
unfold
let all_if_then_else
(heap a p: Type)
(wp_then wp_else: all_wp_h heap a)
(post: all_post_h heap a)
(h0: heap)
= wp_then post h0 /\ (~p ==> wp_else post h0)
(** Naming postcondition for better sharing in [ALL_h] *)
unfold
let all_ite_wp (heap a: Type) (wp: all_wp_h heap a) (post: all_post_h heap a) (h0: heap) =
forall (k: all_post_h heap a).
(forall (x: result a) (h: heap). {:pattern (guard_free (k x h))} post x h ==> k x h) ==> wp k h0
(** Subsumption in [ALL_h] *)
unfold
let all_stronger (heap a: Type) (wp1 wp2: all_wp_h heap a) =
(forall (p: all_post_h heap a) (h: heap). wp1 p h ==> wp2 p h)
(** Closing a binder in the scope of an [ALL_h] wp *)
unfold
let all_close_wp
(heap a b: Type)
(wp: (b -> GTot (all_wp_h heap a)))
(p: all_post_h heap a)
(h: heap)
= (forall (b: b). wp b p h)
(** Applying an [ALL_h] wp to a trivial postcondition *)
unfold
let all_trivial (heap a: Type) (wp: all_wp_h heap a) = (forall (h0: heap). wp (fun r h1 -> True) h0)
(** Introducing the [ALL_h] effect template *)
new_effect {
ALL_h (heap: Type) : a: Type -> wp: all_wp_h heap a -> Effect
with
return_wp = all_return heap
; bind_wp = all_bind_wp heap
; if_then_else = all_if_then_else heap
; ite_wp = all_ite_wp heap
; stronger = all_stronger heap
; close_wp = all_close_wp heap
; trivial = all_trivial heap
}
(**
Controlling inversions of inductive type
Given a value of an inductive type [v:t], where [t = A | B], the SMT
solver can only prove that [v=A \/ v=B] by _inverting_ [t]. This
inversion is controlled by the [ifuel] setting, which usually limits
the recursion depth of the number of such inversions that the solver
can perform.
The [inversion] predicate below is a way to circumvent the
[ifuel]-based restrictions on inversion depth. In particular, if the
[inversion t] is available in the SMT solver's context, it is free to
invert [t] infinitely, regardless of the [ifuel] setting.
Be careful using this, since it explicitly subverts the [ifuel]
setting. If used unwisely, this can lead to very poor SMT solver
performance. *)
[@@ remove_unused_type_parameters [0]]
val inversion (a: Type) : Type0
(** To introduce [inversion t] in the SMT solver's context, call
[allow_inversion t]. *)
val allow_inversion (a: Type) : Pure unit (requires True) (ensures (fun x -> inversion a))
(** Since the [option] type is so common, we always allow inverting
options, regardless of [ifuel] *)
val invertOption (a: Type)
: Lemma (requires True) (ensures (forall (x: option a). None? x \/ Some? x)) [SMTPat (option a)]
(** Values of type [a] or type [b] *)
type either a b =
| Inl : v: a -> either a b
| Inr : v: b -> either a b
(** Projections for the components of a dependent pair *)
let dfst (#a: Type) (#b: a -> GTot Type) (t: dtuple2 a b) | false | false | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val dfst (#a: Type) (#b: (a -> GTot Type)) (t: dtuple2 a b) : Tot a | [] | FStar.Pervasives.dfst | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | t: Prims.dtuple2 a b -> a | {
"end_col": 19,
"end_line": 734,
"start_col": 4,
"start_line": 734
} |
Prims.Tot | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let all_if_then_else
(heap a p: Type)
(wp_then wp_else: all_wp_h heap a)
(post: all_post_h heap a)
(h0: heap)
= wp_then post h0 /\ (~p ==> wp_else post h0) | let all_if_then_else
(heap a p: Type)
(wp_then wp_else: all_wp_h heap a)
(post: all_post_h heap a)
(h0: heap)
= | false | null | false | wp_then post h0 /\ (~p ==> wp_else post h0) | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"total"
] | [
"FStar.Pervasives.all_wp_h",
"FStar.Pervasives.all_post_h",
"Prims.l_and",
"Prims.l_imp",
"Prims.l_not",
"Prims.logical"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold
let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp
unfold
let pure_close_wp (a b:Type) (wp:b -> Tot (pure_wp a)) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp
unfold
let pure_null_wp (a:Type) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_null_wp0 a
[@@ "opaque_to_smt"]
unfold
let pure_assert_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assert_wp0 p
[@@ "opaque_to_smt"]
unfold
let pure_assume_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assume_wp0 p
/// The [DIV] effect for divergent computations
///
/// The wp-calculus for [DIV] is same as that of [PURE]
(** The effect of divergence: from a specificational perspective it is
identical to PURE, however the specs are given a partial
correctness interpretation. Computations with the [DIV] effect may
not terminate. *)
new_effect {
DIV : a:Type -> wp:pure_wp a -> Effect
with
return_wp = pure_return
; bind_wp = pure_bind_wp
; if_then_else = pure_if_then_else
; ite_wp = pure_ite_wp
; stronger = pure_stronger
; close_wp = pure_close_wp
; trivial = pure_trivial
}
(** [PURE] computations can be silently promoted for use in a [DIV] context *)
sub_effect PURE ~> DIV { lift_wp = purewp_id }
(** [Div] is the Hoare-style counterpart of the wp-indexed [DIV] *)
unfold
let div_hoare_to_wp (#a:Type) (#pre:pure_pre) (post:pure_post' a pre) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
fun (p:pure_post a) -> pre /\ (forall a. post a ==> p a)
effect Div (a: Type) (pre: pure_pre) (post: pure_post' a pre) =
DIV a (div_hoare_to_wp post)
(** [Dv] is the instance of [DIV] with trivial pre- and postconditions *)
effect Dv (a: Type) = DIV a (pure_null_wp a)
(** We use the [EXT] effect to underspecify external system calls
as being impure but having no observable effect on the state *)
effect EXT (a: Type) = Dv a
/// The [STATE_h] effect template for stateful computations, generic
/// in the type of the state.
///
/// Note, [STATE_h] is itself not a computation type in F*, since it
/// is parameterized by the type of heap. However, instantiations of
/// [STATE_h] with specific types of the heap are computation
/// types. See, e.g., [FStar.ST] for such instantiations.
///
/// Weakest preconditions for stateful computations transform
/// [st_post_h] postconditions to [st_pre_h] preconditions. Both are
/// parametric in the type of the state, here denoted by the
/// [heap:Type] variable.
(** Preconditions are predicates on the [heap] *)
let st_pre_h (heap: Type) = heap -> GTot Type0
(** Postconditions relate [a]-typed results to the final [heap], here
refined by some pure proposition [pre], typically instantiated to
the precondition applied to the initial [heap] *)
let st_post_h' (heap a pre: Type) = a -> _: heap{pre} -> GTot Type0
(** Postconditions without refinements *)
let st_post_h (heap a: Type) = st_post_h' heap a True
(** The type of the main WP-transformer for stateful computations *)
let st_wp_h (heap a: Type) = st_post_h heap a -> Tot (st_pre_h heap)
(** Returning a value does not transform the state *)
unfold
let st_return (heap a: Type) (x: a) (p: st_post_h heap a) = p x
(** Sequential composition of stateful WPs *)
unfold
let st_bind_wp
(heap: Type)
(a b: Type)
(wp1: st_wp_h heap a)
(wp2: (a -> GTot (st_wp_h heap b)))
(p: st_post_h heap b)
(h0: heap)
= wp1 (fun a h1 -> wp2 a p h1) h0
(** Branching for stateful WPs *)
unfold
let st_if_then_else
(heap a p: Type)
(wp_then wp_else: st_wp_h heap a)
(post: st_post_h heap a)
(h0: heap)
= wp_then post h0 /\ (~p ==> wp_else post h0)
(** As with [PURE] the [wp] combinator names the postcondition as
[k] to avoid duplicating it. *)
unfold
let st_ite_wp (heap a: Type) (wp: st_wp_h heap a) (post: st_post_h heap a) (h0: heap) =
forall (k: st_post_h heap a).
(forall (x: a) (h: heap). {:pattern (guard_free (k x h))} post x h ==> k x h) ==> wp k h0
(** Subsumption for stateful WPs *)
unfold
let st_stronger (heap a: Type) (wp1 wp2: st_wp_h heap a) =
(forall (p: st_post_h heap a) (h: heap). wp1 p h ==> wp2 p h)
(** Closing the scope of a binder within a stateful WP *)
unfold
let st_close_wp (heap a b: Type) (wp: (b -> GTot (st_wp_h heap a))) (p: st_post_h heap a) (h: heap) =
(forall (b: b). wp b p h)
(** Applying a stateful WP to a trivial postcondition *)
unfold
let st_trivial (heap a: Type) (wp: st_wp_h heap a) = (forall h0. wp (fun r h1 -> True) h0)
(** Introducing a new effect template [STATE_h] *)
new_effect {
STATE_h (heap: Type) : result: Type -> wp: st_wp_h heap result -> Effect
with
return_wp = st_return heap
; bind_wp = st_bind_wp heap
; if_then_else = st_if_then_else heap
; ite_wp = st_ite_wp heap
; stronger = st_stronger heap
; close_wp = st_close_wp heap
; trivial = st_trivial heap
}
/// The [EXN] effect for computations that may raise exceptions or
/// fatal errors
///
/// Weakest preconditions for stateful computations transform
/// [ex_post] postconditions (predicates on [result]s) to [ex_pre]
/// precondition propositions.
(** Normal results are represented using [V x].
Handleable exceptions are represented [E e].
Fatal errors are [Err msg]. *)
noeq
type result (a: Type) =
| V : v: a -> result a
| E : e: exn -> result a
| Err : msg: string -> result a
(** Exceptional preconditions are just propositions *)
let ex_pre = Type0
(** Postconditions on results refined by a precondition *)
let ex_post' (a pre: Type) = _: result a {pre} -> GTot Type0
(** Postconditions on results *)
let ex_post (a: Type) = ex_post' a True
(** Exceptions WP-predicate transformers *)
let ex_wp (a: Type) = ex_post a -> GTot ex_pre
(** Returning a value [x] normally promotes it to the [V x] result *)
unfold
let ex_return (a: Type) (x: a) (p: ex_post a) : GTot Type0 = p (V x)
(** Sequential composition of exception-raising code requires case analysing
the result of the first computation before "running" the second one *)
unfold
let ex_bind_wp (a b: Type) (wp1: ex_wp a) (wp2: (a -> GTot (ex_wp b))) (p: ex_post b)
: GTot Type0 =
forall (k: ex_post b).
(forall (rb: result b). {:pattern (guard_free (k rb))} p rb ==> k rb) ==>
(wp1 (function
| V ra1 -> wp2 ra1 k
| E e -> k (E e)
| Err m -> k (Err m)))
(** As for other effects, branching in [ex_wp] appears in two forms.
First, a simple case analysis on [p] *)
unfold
let ex_if_then_else (a p: Type) (wp_then wp_else: ex_wp a) (post: ex_post a) =
wp_then post /\ (~p ==> wp_else post)
(** Naming continuations for use with branching *)
unfold
let ex_ite_wp (a: Type) (wp: ex_wp a) (post: ex_post a) =
forall (k: ex_post a).
(forall (rb: result a). {:pattern (guard_free (k rb))} post rb ==> k rb) ==> wp k
(** Subsumption for exceptional WPs *)
unfold
let ex_stronger (a: Type) (wp1 wp2: ex_wp a) = (forall (p: ex_post a). wp1 p ==> wp2 p)
(** Closing the scope of a binder for exceptional WPs *)
unfold
let ex_close_wp (a b: Type) (wp: (b -> GTot (ex_wp a))) (p: ex_post a) = (forall (b: b). wp b p)
(** Applying a computation with a trivial postcondition *)
unfold
let ex_trivial (a: Type) (wp: ex_wp a) = wp (fun r -> True)
(** Introduce a new effect for [EXN] *)
new_effect {
EXN : result: Type -> wp: ex_wp result -> Effect
with
return_wp = ex_return
; bind_wp = ex_bind_wp
; if_then_else = ex_if_then_else
; ite_wp = ex_ite_wp
; stronger = ex_stronger
; close_wp = ex_close_wp
; trivial = ex_trivial
}
(** A Hoare-style abbreviation for EXN *)
effect Exn (a: Type) (pre: ex_pre) (post: ex_post' a pre) =
EXN a (fun (p: ex_post a) -> pre /\ (forall (r: result a). post r ==> p r))
(** We include divergence in exceptions.
NOTE: BE WARNED, CODE IN THE [EXN] EFFECT IS ONLY CHECKED FOR
PARTIAL CORRECTNESS *)
unfold
let lift_div_exn (a: Type) (wp: pure_wp a) (p: ex_post a) = wp (fun a -> p (V a))
sub_effect DIV ~> EXN { lift_wp = lift_div_exn }
(** A variant of [Exn] with trivial pre- and postconditions *)
effect Ex (a: Type) = Exn a True (fun v -> True)
/// The [ALL_h] effect template for computations that may diverge,
/// raise exceptions or fatal errors, and uses a generic state.
///
/// Note, this effect is poorly named, particularly as F* has since
/// gained many more user-defined effect. We no longer have an effect
/// that includes all others.
///
/// We might rename this in the future to something like [StExnDiv_h].
///
/// We layer state on top of exceptions, meaning that raising an
/// exception does not discard the state.
///
/// As with [STATE_h], [ALL_h] is not a computation type, though its
/// instantiation with a specific type of [heap] (in FStar.All) is.
(** [all_pre_h] is a predicate on the initial state *)
let all_pre_h (h: Type) = h -> GTot Type0
(** Postconditions relate [result]s to final [heap]s refined by a precondition *)
let all_post_h' (h a pre: Type) = result a -> _: h{pre} -> GTot Type0
(** A variant of [all_post_h'] without the precondition refinement *)
let all_post_h (h a: Type) = all_post_h' h a True
(** WP predicate transformers for the [All_h] effect template *)
let all_wp_h (h a: Type) = all_post_h h a -> Tot (all_pre_h h)
(** Returning a value [x] normally promotes it to the [V x] result
without touching the [heap] *)
unfold
let all_return (heap a: Type) (x: a) (p: all_post_h heap a) = p (V x)
(** Sequential composition for [ALL_h] is like [EXN]: case analysis of
the exceptional result before "running" the continuation *)
unfold
let all_bind_wp
(heap: Type)
(a b: Type)
(wp1: all_wp_h heap a)
(wp2: (a -> GTot (all_wp_h heap b)))
(p: all_post_h heap b)
(h0: heap)
: GTot Type0 =
wp1 (fun ra h1 ->
(match ra with
| V v -> wp2 v p h1
| E e -> p (E e) h1
| Err msg -> p (Err msg) h1))
h0
(** Case analysis in [ALL_h] *)
unfold
let all_if_then_else
(heap a p: Type)
(wp_then wp_else: all_wp_h heap a)
(post: all_post_h heap a) | false | false | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val all_if_then_else : heap: Type ->
a: Type ->
p: Type0 ->
wp_then: FStar.Pervasives.all_wp_h heap a ->
wp_else: FStar.Pervasives.all_wp_h heap a ->
post: FStar.Pervasives.all_post_h heap a ->
h0: heap
-> Prims.logical | [] | FStar.Pervasives.all_if_then_else | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
heap: Type ->
a: Type ->
p: Type0 ->
wp_then: FStar.Pervasives.all_wp_h heap a ->
wp_else: FStar.Pervasives.all_wp_h heap a ->
post: FStar.Pervasives.all_post_h heap a ->
h0: heap
-> Prims.logical | {
"end_col": 50,
"end_line": 658,
"start_col": 7,
"start_line": 658
} |
|
Prims.Tot | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let all_stronger (heap a: Type) (wp1 wp2: all_wp_h heap a) =
(forall (p: all_post_h heap a) (h: heap). wp1 p h ==> wp2 p h) | let all_stronger (heap a: Type) (wp1 wp2: all_wp_h heap a) = | false | null | false | (forall (p: all_post_h heap a) (h: heap). wp1 p h ==> wp2 p h) | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"total"
] | [
"FStar.Pervasives.all_wp_h",
"Prims.l_Forall",
"FStar.Pervasives.all_post_h",
"Prims.l_imp",
"Prims.logical"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold
let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp
unfold
let pure_close_wp (a b:Type) (wp:b -> Tot (pure_wp a)) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp
unfold
let pure_null_wp (a:Type) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_null_wp0 a
[@@ "opaque_to_smt"]
unfold
let pure_assert_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assert_wp0 p
[@@ "opaque_to_smt"]
unfold
let pure_assume_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assume_wp0 p
/// The [DIV] effect for divergent computations
///
/// The wp-calculus for [DIV] is same as that of [PURE]
(** The effect of divergence: from a specificational perspective it is
identical to PURE, however the specs are given a partial
correctness interpretation. Computations with the [DIV] effect may
not terminate. *)
new_effect {
DIV : a:Type -> wp:pure_wp a -> Effect
with
return_wp = pure_return
; bind_wp = pure_bind_wp
; if_then_else = pure_if_then_else
; ite_wp = pure_ite_wp
; stronger = pure_stronger
; close_wp = pure_close_wp
; trivial = pure_trivial
}
(** [PURE] computations can be silently promoted for use in a [DIV] context *)
sub_effect PURE ~> DIV { lift_wp = purewp_id }
(** [Div] is the Hoare-style counterpart of the wp-indexed [DIV] *)
unfold
let div_hoare_to_wp (#a:Type) (#pre:pure_pre) (post:pure_post' a pre) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
fun (p:pure_post a) -> pre /\ (forall a. post a ==> p a)
effect Div (a: Type) (pre: pure_pre) (post: pure_post' a pre) =
DIV a (div_hoare_to_wp post)
(** [Dv] is the instance of [DIV] with trivial pre- and postconditions *)
effect Dv (a: Type) = DIV a (pure_null_wp a)
(** We use the [EXT] effect to underspecify external system calls
as being impure but having no observable effect on the state *)
effect EXT (a: Type) = Dv a
/// The [STATE_h] effect template for stateful computations, generic
/// in the type of the state.
///
/// Note, [STATE_h] is itself not a computation type in F*, since it
/// is parameterized by the type of heap. However, instantiations of
/// [STATE_h] with specific types of the heap are computation
/// types. See, e.g., [FStar.ST] for such instantiations.
///
/// Weakest preconditions for stateful computations transform
/// [st_post_h] postconditions to [st_pre_h] preconditions. Both are
/// parametric in the type of the state, here denoted by the
/// [heap:Type] variable.
(** Preconditions are predicates on the [heap] *)
let st_pre_h (heap: Type) = heap -> GTot Type0
(** Postconditions relate [a]-typed results to the final [heap], here
refined by some pure proposition [pre], typically instantiated to
the precondition applied to the initial [heap] *)
let st_post_h' (heap a pre: Type) = a -> _: heap{pre} -> GTot Type0
(** Postconditions without refinements *)
let st_post_h (heap a: Type) = st_post_h' heap a True
(** The type of the main WP-transformer for stateful computations *)
let st_wp_h (heap a: Type) = st_post_h heap a -> Tot (st_pre_h heap)
(** Returning a value does not transform the state *)
unfold
let st_return (heap a: Type) (x: a) (p: st_post_h heap a) = p x
(** Sequential composition of stateful WPs *)
unfold
let st_bind_wp
(heap: Type)
(a b: Type)
(wp1: st_wp_h heap a)
(wp2: (a -> GTot (st_wp_h heap b)))
(p: st_post_h heap b)
(h0: heap)
= wp1 (fun a h1 -> wp2 a p h1) h0
(** Branching for stateful WPs *)
unfold
let st_if_then_else
(heap a p: Type)
(wp_then wp_else: st_wp_h heap a)
(post: st_post_h heap a)
(h0: heap)
= wp_then post h0 /\ (~p ==> wp_else post h0)
(** As with [PURE] the [wp] combinator names the postcondition as
[k] to avoid duplicating it. *)
unfold
let st_ite_wp (heap a: Type) (wp: st_wp_h heap a) (post: st_post_h heap a) (h0: heap) =
forall (k: st_post_h heap a).
(forall (x: a) (h: heap). {:pattern (guard_free (k x h))} post x h ==> k x h) ==> wp k h0
(** Subsumption for stateful WPs *)
unfold
let st_stronger (heap a: Type) (wp1 wp2: st_wp_h heap a) =
(forall (p: st_post_h heap a) (h: heap). wp1 p h ==> wp2 p h)
(** Closing the scope of a binder within a stateful WP *)
unfold
let st_close_wp (heap a b: Type) (wp: (b -> GTot (st_wp_h heap a))) (p: st_post_h heap a) (h: heap) =
(forall (b: b). wp b p h)
(** Applying a stateful WP to a trivial postcondition *)
unfold
let st_trivial (heap a: Type) (wp: st_wp_h heap a) = (forall h0. wp (fun r h1 -> True) h0)
(** Introducing a new effect template [STATE_h] *)
new_effect {
STATE_h (heap: Type) : result: Type -> wp: st_wp_h heap result -> Effect
with
return_wp = st_return heap
; bind_wp = st_bind_wp heap
; if_then_else = st_if_then_else heap
; ite_wp = st_ite_wp heap
; stronger = st_stronger heap
; close_wp = st_close_wp heap
; trivial = st_trivial heap
}
/// The [EXN] effect for computations that may raise exceptions or
/// fatal errors
///
/// Weakest preconditions for stateful computations transform
/// [ex_post] postconditions (predicates on [result]s) to [ex_pre]
/// precondition propositions.
(** Normal results are represented using [V x].
Handleable exceptions are represented [E e].
Fatal errors are [Err msg]. *)
noeq
type result (a: Type) =
| V : v: a -> result a
| E : e: exn -> result a
| Err : msg: string -> result a
(** Exceptional preconditions are just propositions *)
let ex_pre = Type0
(** Postconditions on results refined by a precondition *)
let ex_post' (a pre: Type) = _: result a {pre} -> GTot Type0
(** Postconditions on results *)
let ex_post (a: Type) = ex_post' a True
(** Exceptions WP-predicate transformers *)
let ex_wp (a: Type) = ex_post a -> GTot ex_pre
(** Returning a value [x] normally promotes it to the [V x] result *)
unfold
let ex_return (a: Type) (x: a) (p: ex_post a) : GTot Type0 = p (V x)
(** Sequential composition of exception-raising code requires case analysing
the result of the first computation before "running" the second one *)
unfold
let ex_bind_wp (a b: Type) (wp1: ex_wp a) (wp2: (a -> GTot (ex_wp b))) (p: ex_post b)
: GTot Type0 =
forall (k: ex_post b).
(forall (rb: result b). {:pattern (guard_free (k rb))} p rb ==> k rb) ==>
(wp1 (function
| V ra1 -> wp2 ra1 k
| E e -> k (E e)
| Err m -> k (Err m)))
(** As for other effects, branching in [ex_wp] appears in two forms.
First, a simple case analysis on [p] *)
unfold
let ex_if_then_else (a p: Type) (wp_then wp_else: ex_wp a) (post: ex_post a) =
wp_then post /\ (~p ==> wp_else post)
(** Naming continuations for use with branching *)
unfold
let ex_ite_wp (a: Type) (wp: ex_wp a) (post: ex_post a) =
forall (k: ex_post a).
(forall (rb: result a). {:pattern (guard_free (k rb))} post rb ==> k rb) ==> wp k
(** Subsumption for exceptional WPs *)
unfold
let ex_stronger (a: Type) (wp1 wp2: ex_wp a) = (forall (p: ex_post a). wp1 p ==> wp2 p)
(** Closing the scope of a binder for exceptional WPs *)
unfold
let ex_close_wp (a b: Type) (wp: (b -> GTot (ex_wp a))) (p: ex_post a) = (forall (b: b). wp b p)
(** Applying a computation with a trivial postcondition *)
unfold
let ex_trivial (a: Type) (wp: ex_wp a) = wp (fun r -> True)
(** Introduce a new effect for [EXN] *)
new_effect {
EXN : result: Type -> wp: ex_wp result -> Effect
with
return_wp = ex_return
; bind_wp = ex_bind_wp
; if_then_else = ex_if_then_else
; ite_wp = ex_ite_wp
; stronger = ex_stronger
; close_wp = ex_close_wp
; trivial = ex_trivial
}
(** A Hoare-style abbreviation for EXN *)
effect Exn (a: Type) (pre: ex_pre) (post: ex_post' a pre) =
EXN a (fun (p: ex_post a) -> pre /\ (forall (r: result a). post r ==> p r))
(** We include divergence in exceptions.
NOTE: BE WARNED, CODE IN THE [EXN] EFFECT IS ONLY CHECKED FOR
PARTIAL CORRECTNESS *)
unfold
let lift_div_exn (a: Type) (wp: pure_wp a) (p: ex_post a) = wp (fun a -> p (V a))
sub_effect DIV ~> EXN { lift_wp = lift_div_exn }
(** A variant of [Exn] with trivial pre- and postconditions *)
effect Ex (a: Type) = Exn a True (fun v -> True)
/// The [ALL_h] effect template for computations that may diverge,
/// raise exceptions or fatal errors, and uses a generic state.
///
/// Note, this effect is poorly named, particularly as F* has since
/// gained many more user-defined effect. We no longer have an effect
/// that includes all others.
///
/// We might rename this in the future to something like [StExnDiv_h].
///
/// We layer state on top of exceptions, meaning that raising an
/// exception does not discard the state.
///
/// As with [STATE_h], [ALL_h] is not a computation type, though its
/// instantiation with a specific type of [heap] (in FStar.All) is.
(** [all_pre_h] is a predicate on the initial state *)
let all_pre_h (h: Type) = h -> GTot Type0
(** Postconditions relate [result]s to final [heap]s refined by a precondition *)
let all_post_h' (h a pre: Type) = result a -> _: h{pre} -> GTot Type0
(** A variant of [all_post_h'] without the precondition refinement *)
let all_post_h (h a: Type) = all_post_h' h a True
(** WP predicate transformers for the [All_h] effect template *)
let all_wp_h (h a: Type) = all_post_h h a -> Tot (all_pre_h h)
(** Returning a value [x] normally promotes it to the [V x] result
without touching the [heap] *)
unfold
let all_return (heap a: Type) (x: a) (p: all_post_h heap a) = p (V x)
(** Sequential composition for [ALL_h] is like [EXN]: case analysis of
the exceptional result before "running" the continuation *)
unfold
let all_bind_wp
(heap: Type)
(a b: Type)
(wp1: all_wp_h heap a)
(wp2: (a -> GTot (all_wp_h heap b)))
(p: all_post_h heap b)
(h0: heap)
: GTot Type0 =
wp1 (fun ra h1 ->
(match ra with
| V v -> wp2 v p h1
| E e -> p (E e) h1
| Err msg -> p (Err msg) h1))
h0
(** Case analysis in [ALL_h] *)
unfold
let all_if_then_else
(heap a p: Type)
(wp_then wp_else: all_wp_h heap a)
(post: all_post_h heap a)
(h0: heap)
= wp_then post h0 /\ (~p ==> wp_else post h0)
(** Naming postcondition for better sharing in [ALL_h] *)
unfold
let all_ite_wp (heap a: Type) (wp: all_wp_h heap a) (post: all_post_h heap a) (h0: heap) =
forall (k: all_post_h heap a).
(forall (x: result a) (h: heap). {:pattern (guard_free (k x h))} post x h ==> k x h) ==> wp k h0
(** Subsumption in [ALL_h] *)
unfold | false | false | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val all_stronger : heap: Type ->
a: Type ->
wp1: FStar.Pervasives.all_wp_h heap a ->
wp2: FStar.Pervasives.all_wp_h heap a
-> Prims.logical | [] | FStar.Pervasives.all_stronger | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
heap: Type ->
a: Type ->
wp1: FStar.Pervasives.all_wp_h heap a ->
wp2: FStar.Pervasives.all_wp_h heap a
-> Prims.logical | {
"end_col": 64,
"end_line": 669,
"start_col": 2,
"start_line": 669
} |
|
Prims.Tot | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let all_close_wp
(heap a b: Type)
(wp: (b -> GTot (all_wp_h heap a)))
(p: all_post_h heap a)
(h: heap)
= (forall (b: b). wp b p h) | let all_close_wp
(heap a b: Type)
(wp: (b -> GTot (all_wp_h heap a)))
(p: all_post_h heap a)
(h: heap)
= | false | null | false | (forall (b: b). wp b p h) | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"total"
] | [
"FStar.Pervasives.all_wp_h",
"FStar.Pervasives.all_post_h",
"Prims.l_Forall",
"Prims.logical"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold
let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp
unfold
let pure_close_wp (a b:Type) (wp:b -> Tot (pure_wp a)) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp
unfold
let pure_null_wp (a:Type) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_null_wp0 a
[@@ "opaque_to_smt"]
unfold
let pure_assert_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assert_wp0 p
[@@ "opaque_to_smt"]
unfold
let pure_assume_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assume_wp0 p
/// The [DIV] effect for divergent computations
///
/// The wp-calculus for [DIV] is same as that of [PURE]
(** The effect of divergence: from a specificational perspective it is
identical to PURE, however the specs are given a partial
correctness interpretation. Computations with the [DIV] effect may
not terminate. *)
new_effect {
DIV : a:Type -> wp:pure_wp a -> Effect
with
return_wp = pure_return
; bind_wp = pure_bind_wp
; if_then_else = pure_if_then_else
; ite_wp = pure_ite_wp
; stronger = pure_stronger
; close_wp = pure_close_wp
; trivial = pure_trivial
}
(** [PURE] computations can be silently promoted for use in a [DIV] context *)
sub_effect PURE ~> DIV { lift_wp = purewp_id }
(** [Div] is the Hoare-style counterpart of the wp-indexed [DIV] *)
unfold
let div_hoare_to_wp (#a:Type) (#pre:pure_pre) (post:pure_post' a pre) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
fun (p:pure_post a) -> pre /\ (forall a. post a ==> p a)
effect Div (a: Type) (pre: pure_pre) (post: pure_post' a pre) =
DIV a (div_hoare_to_wp post)
(** [Dv] is the instance of [DIV] with trivial pre- and postconditions *)
effect Dv (a: Type) = DIV a (pure_null_wp a)
(** We use the [EXT] effect to underspecify external system calls
as being impure but having no observable effect on the state *)
effect EXT (a: Type) = Dv a
/// The [STATE_h] effect template for stateful computations, generic
/// in the type of the state.
///
/// Note, [STATE_h] is itself not a computation type in F*, since it
/// is parameterized by the type of heap. However, instantiations of
/// [STATE_h] with specific types of the heap are computation
/// types. See, e.g., [FStar.ST] for such instantiations.
///
/// Weakest preconditions for stateful computations transform
/// [st_post_h] postconditions to [st_pre_h] preconditions. Both are
/// parametric in the type of the state, here denoted by the
/// [heap:Type] variable.
(** Preconditions are predicates on the [heap] *)
let st_pre_h (heap: Type) = heap -> GTot Type0
(** Postconditions relate [a]-typed results to the final [heap], here
refined by some pure proposition [pre], typically instantiated to
the precondition applied to the initial [heap] *)
let st_post_h' (heap a pre: Type) = a -> _: heap{pre} -> GTot Type0
(** Postconditions without refinements *)
let st_post_h (heap a: Type) = st_post_h' heap a True
(** The type of the main WP-transformer for stateful computations *)
let st_wp_h (heap a: Type) = st_post_h heap a -> Tot (st_pre_h heap)
(** Returning a value does not transform the state *)
unfold
let st_return (heap a: Type) (x: a) (p: st_post_h heap a) = p x
(** Sequential composition of stateful WPs *)
unfold
let st_bind_wp
(heap: Type)
(a b: Type)
(wp1: st_wp_h heap a)
(wp2: (a -> GTot (st_wp_h heap b)))
(p: st_post_h heap b)
(h0: heap)
= wp1 (fun a h1 -> wp2 a p h1) h0
(** Branching for stateful WPs *)
unfold
let st_if_then_else
(heap a p: Type)
(wp_then wp_else: st_wp_h heap a)
(post: st_post_h heap a)
(h0: heap)
= wp_then post h0 /\ (~p ==> wp_else post h0)
(** As with [PURE] the [wp] combinator names the postcondition as
[k] to avoid duplicating it. *)
unfold
let st_ite_wp (heap a: Type) (wp: st_wp_h heap a) (post: st_post_h heap a) (h0: heap) =
forall (k: st_post_h heap a).
(forall (x: a) (h: heap). {:pattern (guard_free (k x h))} post x h ==> k x h) ==> wp k h0
(** Subsumption for stateful WPs *)
unfold
let st_stronger (heap a: Type) (wp1 wp2: st_wp_h heap a) =
(forall (p: st_post_h heap a) (h: heap). wp1 p h ==> wp2 p h)
(** Closing the scope of a binder within a stateful WP *)
unfold
let st_close_wp (heap a b: Type) (wp: (b -> GTot (st_wp_h heap a))) (p: st_post_h heap a) (h: heap) =
(forall (b: b). wp b p h)
(** Applying a stateful WP to a trivial postcondition *)
unfold
let st_trivial (heap a: Type) (wp: st_wp_h heap a) = (forall h0. wp (fun r h1 -> True) h0)
(** Introducing a new effect template [STATE_h] *)
new_effect {
STATE_h (heap: Type) : result: Type -> wp: st_wp_h heap result -> Effect
with
return_wp = st_return heap
; bind_wp = st_bind_wp heap
; if_then_else = st_if_then_else heap
; ite_wp = st_ite_wp heap
; stronger = st_stronger heap
; close_wp = st_close_wp heap
; trivial = st_trivial heap
}
/// The [EXN] effect for computations that may raise exceptions or
/// fatal errors
///
/// Weakest preconditions for stateful computations transform
/// [ex_post] postconditions (predicates on [result]s) to [ex_pre]
/// precondition propositions.
(** Normal results are represented using [V x].
Handleable exceptions are represented [E e].
Fatal errors are [Err msg]. *)
noeq
type result (a: Type) =
| V : v: a -> result a
| E : e: exn -> result a
| Err : msg: string -> result a
(** Exceptional preconditions are just propositions *)
let ex_pre = Type0
(** Postconditions on results refined by a precondition *)
let ex_post' (a pre: Type) = _: result a {pre} -> GTot Type0
(** Postconditions on results *)
let ex_post (a: Type) = ex_post' a True
(** Exceptions WP-predicate transformers *)
let ex_wp (a: Type) = ex_post a -> GTot ex_pre
(** Returning a value [x] normally promotes it to the [V x] result *)
unfold
let ex_return (a: Type) (x: a) (p: ex_post a) : GTot Type0 = p (V x)
(** Sequential composition of exception-raising code requires case analysing
the result of the first computation before "running" the second one *)
unfold
let ex_bind_wp (a b: Type) (wp1: ex_wp a) (wp2: (a -> GTot (ex_wp b))) (p: ex_post b)
: GTot Type0 =
forall (k: ex_post b).
(forall (rb: result b). {:pattern (guard_free (k rb))} p rb ==> k rb) ==>
(wp1 (function
| V ra1 -> wp2 ra1 k
| E e -> k (E e)
| Err m -> k (Err m)))
(** As for other effects, branching in [ex_wp] appears in two forms.
First, a simple case analysis on [p] *)
unfold
let ex_if_then_else (a p: Type) (wp_then wp_else: ex_wp a) (post: ex_post a) =
wp_then post /\ (~p ==> wp_else post)
(** Naming continuations for use with branching *)
unfold
let ex_ite_wp (a: Type) (wp: ex_wp a) (post: ex_post a) =
forall (k: ex_post a).
(forall (rb: result a). {:pattern (guard_free (k rb))} post rb ==> k rb) ==> wp k
(** Subsumption for exceptional WPs *)
unfold
let ex_stronger (a: Type) (wp1 wp2: ex_wp a) = (forall (p: ex_post a). wp1 p ==> wp2 p)
(** Closing the scope of a binder for exceptional WPs *)
unfold
let ex_close_wp (a b: Type) (wp: (b -> GTot (ex_wp a))) (p: ex_post a) = (forall (b: b). wp b p)
(** Applying a computation with a trivial postcondition *)
unfold
let ex_trivial (a: Type) (wp: ex_wp a) = wp (fun r -> True)
(** Introduce a new effect for [EXN] *)
new_effect {
EXN : result: Type -> wp: ex_wp result -> Effect
with
return_wp = ex_return
; bind_wp = ex_bind_wp
; if_then_else = ex_if_then_else
; ite_wp = ex_ite_wp
; stronger = ex_stronger
; close_wp = ex_close_wp
; trivial = ex_trivial
}
(** A Hoare-style abbreviation for EXN *)
effect Exn (a: Type) (pre: ex_pre) (post: ex_post' a pre) =
EXN a (fun (p: ex_post a) -> pre /\ (forall (r: result a). post r ==> p r))
(** We include divergence in exceptions.
NOTE: BE WARNED, CODE IN THE [EXN] EFFECT IS ONLY CHECKED FOR
PARTIAL CORRECTNESS *)
unfold
let lift_div_exn (a: Type) (wp: pure_wp a) (p: ex_post a) = wp (fun a -> p (V a))
sub_effect DIV ~> EXN { lift_wp = lift_div_exn }
(** A variant of [Exn] with trivial pre- and postconditions *)
effect Ex (a: Type) = Exn a True (fun v -> True)
/// The [ALL_h] effect template for computations that may diverge,
/// raise exceptions or fatal errors, and uses a generic state.
///
/// Note, this effect is poorly named, particularly as F* has since
/// gained many more user-defined effect. We no longer have an effect
/// that includes all others.
///
/// We might rename this in the future to something like [StExnDiv_h].
///
/// We layer state on top of exceptions, meaning that raising an
/// exception does not discard the state.
///
/// As with [STATE_h], [ALL_h] is not a computation type, though its
/// instantiation with a specific type of [heap] (in FStar.All) is.
(** [all_pre_h] is a predicate on the initial state *)
let all_pre_h (h: Type) = h -> GTot Type0
(** Postconditions relate [result]s to final [heap]s refined by a precondition *)
let all_post_h' (h a pre: Type) = result a -> _: h{pre} -> GTot Type0
(** A variant of [all_post_h'] without the precondition refinement *)
let all_post_h (h a: Type) = all_post_h' h a True
(** WP predicate transformers for the [All_h] effect template *)
let all_wp_h (h a: Type) = all_post_h h a -> Tot (all_pre_h h)
(** Returning a value [x] normally promotes it to the [V x] result
without touching the [heap] *)
unfold
let all_return (heap a: Type) (x: a) (p: all_post_h heap a) = p (V x)
(** Sequential composition for [ALL_h] is like [EXN]: case analysis of
the exceptional result before "running" the continuation *)
unfold
let all_bind_wp
(heap: Type)
(a b: Type)
(wp1: all_wp_h heap a)
(wp2: (a -> GTot (all_wp_h heap b)))
(p: all_post_h heap b)
(h0: heap)
: GTot Type0 =
wp1 (fun ra h1 ->
(match ra with
| V v -> wp2 v p h1
| E e -> p (E e) h1
| Err msg -> p (Err msg) h1))
h0
(** Case analysis in [ALL_h] *)
unfold
let all_if_then_else
(heap a p: Type)
(wp_then wp_else: all_wp_h heap a)
(post: all_post_h heap a)
(h0: heap)
= wp_then post h0 /\ (~p ==> wp_else post h0)
(** Naming postcondition for better sharing in [ALL_h] *)
unfold
let all_ite_wp (heap a: Type) (wp: all_wp_h heap a) (post: all_post_h heap a) (h0: heap) =
forall (k: all_post_h heap a).
(forall (x: result a) (h: heap). {:pattern (guard_free (k x h))} post x h ==> k x h) ==> wp k h0
(** Subsumption in [ALL_h] *)
unfold
let all_stronger (heap a: Type) (wp1 wp2: all_wp_h heap a) =
(forall (p: all_post_h heap a) (h: heap). wp1 p h ==> wp2 p h)
(** Closing a binder in the scope of an [ALL_h] wp *)
unfold
let all_close_wp
(heap a b: Type)
(wp: (b -> GTot (all_wp_h heap a)))
(p: all_post_h heap a) | false | false | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val all_close_wp : heap: Type ->
a: Type ->
b: Type ->
wp: (_: b -> Prims.GTot (FStar.Pervasives.all_wp_h heap a)) ->
p: FStar.Pervasives.all_post_h heap a ->
h: heap
-> Prims.logical | [] | FStar.Pervasives.all_close_wp | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
heap: Type ->
a: Type ->
b: Type ->
wp: (_: b -> Prims.GTot (FStar.Pervasives.all_wp_h heap a)) ->
p: FStar.Pervasives.all_post_h heap a ->
h: heap
-> Prims.logical | {
"end_col": 32,
"end_line": 678,
"start_col": 7,
"start_line": 678
} |
|
Prims.Tot | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let ex_trivial (a: Type) (wp: ex_wp a) = wp (fun r -> True) | let ex_trivial (a: Type) (wp: ex_wp a) = | false | null | false | wp (fun r -> True) | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"total"
] | [
"FStar.Pervasives.ex_wp",
"FStar.Pervasives.result",
"Prims.l_True",
"FStar.Pervasives.ex_pre"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold
let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp
unfold
let pure_close_wp (a b:Type) (wp:b -> Tot (pure_wp a)) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp
unfold
let pure_null_wp (a:Type) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_null_wp0 a
[@@ "opaque_to_smt"]
unfold
let pure_assert_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assert_wp0 p
[@@ "opaque_to_smt"]
unfold
let pure_assume_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assume_wp0 p
/// The [DIV] effect for divergent computations
///
/// The wp-calculus for [DIV] is same as that of [PURE]
(** The effect of divergence: from a specificational perspective it is
identical to PURE, however the specs are given a partial
correctness interpretation. Computations with the [DIV] effect may
not terminate. *)
new_effect {
DIV : a:Type -> wp:pure_wp a -> Effect
with
return_wp = pure_return
; bind_wp = pure_bind_wp
; if_then_else = pure_if_then_else
; ite_wp = pure_ite_wp
; stronger = pure_stronger
; close_wp = pure_close_wp
; trivial = pure_trivial
}
(** [PURE] computations can be silently promoted for use in a [DIV] context *)
sub_effect PURE ~> DIV { lift_wp = purewp_id }
(** [Div] is the Hoare-style counterpart of the wp-indexed [DIV] *)
unfold
let div_hoare_to_wp (#a:Type) (#pre:pure_pre) (post:pure_post' a pre) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
fun (p:pure_post a) -> pre /\ (forall a. post a ==> p a)
effect Div (a: Type) (pre: pure_pre) (post: pure_post' a pre) =
DIV a (div_hoare_to_wp post)
(** [Dv] is the instance of [DIV] with trivial pre- and postconditions *)
effect Dv (a: Type) = DIV a (pure_null_wp a)
(** We use the [EXT] effect to underspecify external system calls
as being impure but having no observable effect on the state *)
effect EXT (a: Type) = Dv a
/// The [STATE_h] effect template for stateful computations, generic
/// in the type of the state.
///
/// Note, [STATE_h] is itself not a computation type in F*, since it
/// is parameterized by the type of heap. However, instantiations of
/// [STATE_h] with specific types of the heap are computation
/// types. See, e.g., [FStar.ST] for such instantiations.
///
/// Weakest preconditions for stateful computations transform
/// [st_post_h] postconditions to [st_pre_h] preconditions. Both are
/// parametric in the type of the state, here denoted by the
/// [heap:Type] variable.
(** Preconditions are predicates on the [heap] *)
let st_pre_h (heap: Type) = heap -> GTot Type0
(** Postconditions relate [a]-typed results to the final [heap], here
refined by some pure proposition [pre], typically instantiated to
the precondition applied to the initial [heap] *)
let st_post_h' (heap a pre: Type) = a -> _: heap{pre} -> GTot Type0
(** Postconditions without refinements *)
let st_post_h (heap a: Type) = st_post_h' heap a True
(** The type of the main WP-transformer for stateful computations *)
let st_wp_h (heap a: Type) = st_post_h heap a -> Tot (st_pre_h heap)
(** Returning a value does not transform the state *)
unfold
let st_return (heap a: Type) (x: a) (p: st_post_h heap a) = p x
(** Sequential composition of stateful WPs *)
unfold
let st_bind_wp
(heap: Type)
(a b: Type)
(wp1: st_wp_h heap a)
(wp2: (a -> GTot (st_wp_h heap b)))
(p: st_post_h heap b)
(h0: heap)
= wp1 (fun a h1 -> wp2 a p h1) h0
(** Branching for stateful WPs *)
unfold
let st_if_then_else
(heap a p: Type)
(wp_then wp_else: st_wp_h heap a)
(post: st_post_h heap a)
(h0: heap)
= wp_then post h0 /\ (~p ==> wp_else post h0)
(** As with [PURE] the [wp] combinator names the postcondition as
[k] to avoid duplicating it. *)
unfold
let st_ite_wp (heap a: Type) (wp: st_wp_h heap a) (post: st_post_h heap a) (h0: heap) =
forall (k: st_post_h heap a).
(forall (x: a) (h: heap). {:pattern (guard_free (k x h))} post x h ==> k x h) ==> wp k h0
(** Subsumption for stateful WPs *)
unfold
let st_stronger (heap a: Type) (wp1 wp2: st_wp_h heap a) =
(forall (p: st_post_h heap a) (h: heap). wp1 p h ==> wp2 p h)
(** Closing the scope of a binder within a stateful WP *)
unfold
let st_close_wp (heap a b: Type) (wp: (b -> GTot (st_wp_h heap a))) (p: st_post_h heap a) (h: heap) =
(forall (b: b). wp b p h)
(** Applying a stateful WP to a trivial postcondition *)
unfold
let st_trivial (heap a: Type) (wp: st_wp_h heap a) = (forall h0. wp (fun r h1 -> True) h0)
(** Introducing a new effect template [STATE_h] *)
new_effect {
STATE_h (heap: Type) : result: Type -> wp: st_wp_h heap result -> Effect
with
return_wp = st_return heap
; bind_wp = st_bind_wp heap
; if_then_else = st_if_then_else heap
; ite_wp = st_ite_wp heap
; stronger = st_stronger heap
; close_wp = st_close_wp heap
; trivial = st_trivial heap
}
/// The [EXN] effect for computations that may raise exceptions or
/// fatal errors
///
/// Weakest preconditions for stateful computations transform
/// [ex_post] postconditions (predicates on [result]s) to [ex_pre]
/// precondition propositions.
(** Normal results are represented using [V x].
Handleable exceptions are represented [E e].
Fatal errors are [Err msg]. *)
noeq
type result (a: Type) =
| V : v: a -> result a
| E : e: exn -> result a
| Err : msg: string -> result a
(** Exceptional preconditions are just propositions *)
let ex_pre = Type0
(** Postconditions on results refined by a precondition *)
let ex_post' (a pre: Type) = _: result a {pre} -> GTot Type0
(** Postconditions on results *)
let ex_post (a: Type) = ex_post' a True
(** Exceptions WP-predicate transformers *)
let ex_wp (a: Type) = ex_post a -> GTot ex_pre
(** Returning a value [x] normally promotes it to the [V x] result *)
unfold
let ex_return (a: Type) (x: a) (p: ex_post a) : GTot Type0 = p (V x)
(** Sequential composition of exception-raising code requires case analysing
the result of the first computation before "running" the second one *)
unfold
let ex_bind_wp (a b: Type) (wp1: ex_wp a) (wp2: (a -> GTot (ex_wp b))) (p: ex_post b)
: GTot Type0 =
forall (k: ex_post b).
(forall (rb: result b). {:pattern (guard_free (k rb))} p rb ==> k rb) ==>
(wp1 (function
| V ra1 -> wp2 ra1 k
| E e -> k (E e)
| Err m -> k (Err m)))
(** As for other effects, branching in [ex_wp] appears in two forms.
First, a simple case analysis on [p] *)
unfold
let ex_if_then_else (a p: Type) (wp_then wp_else: ex_wp a) (post: ex_post a) =
wp_then post /\ (~p ==> wp_else post)
(** Naming continuations for use with branching *)
unfold
let ex_ite_wp (a: Type) (wp: ex_wp a) (post: ex_post a) =
forall (k: ex_post a).
(forall (rb: result a). {:pattern (guard_free (k rb))} post rb ==> k rb) ==> wp k
(** Subsumption for exceptional WPs *)
unfold
let ex_stronger (a: Type) (wp1 wp2: ex_wp a) = (forall (p: ex_post a). wp1 p ==> wp2 p)
(** Closing the scope of a binder for exceptional WPs *)
unfold
let ex_close_wp (a b: Type) (wp: (b -> GTot (ex_wp a))) (p: ex_post a) = (forall (b: b). wp b p)
(** Applying a computation with a trivial postcondition *) | false | false | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val ex_trivial : a: Type -> wp: FStar.Pervasives.ex_wp a -> FStar.Pervasives.ex_pre | [] | FStar.Pervasives.ex_trivial | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: Type -> wp: FStar.Pervasives.ex_wp a -> FStar.Pervasives.ex_pre | {
"end_col": 59,
"end_line": 571,
"start_col": 41,
"start_line": 571
} |
|
Prims.Tot | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let ex_ite_wp (a: Type) (wp: ex_wp a) (post: ex_post a) =
forall (k: ex_post a).
(forall (rb: result a). {:pattern (guard_free (k rb))} post rb ==> k rb) ==> wp k | let ex_ite_wp (a: Type) (wp: ex_wp a) (post: ex_post a) = | false | null | false | forall (k: ex_post a).
(forall (rb: result a). {:pattern (guard_free (k rb))} post rb ==> k rb) ==> wp k | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"total"
] | [
"FStar.Pervasives.ex_wp",
"FStar.Pervasives.ex_post",
"Prims.l_Forall",
"Prims.l_imp",
"FStar.Pervasives.result",
"Prims.guard_free",
"Prims.logical"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold
let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp
unfold
let pure_close_wp (a b:Type) (wp:b -> Tot (pure_wp a)) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp
unfold
let pure_null_wp (a:Type) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_null_wp0 a
[@@ "opaque_to_smt"]
unfold
let pure_assert_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assert_wp0 p
[@@ "opaque_to_smt"]
unfold
let pure_assume_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assume_wp0 p
/// The [DIV] effect for divergent computations
///
/// The wp-calculus for [DIV] is same as that of [PURE]
(** The effect of divergence: from a specificational perspective it is
identical to PURE, however the specs are given a partial
correctness interpretation. Computations with the [DIV] effect may
not terminate. *)
new_effect {
DIV : a:Type -> wp:pure_wp a -> Effect
with
return_wp = pure_return
; bind_wp = pure_bind_wp
; if_then_else = pure_if_then_else
; ite_wp = pure_ite_wp
; stronger = pure_stronger
; close_wp = pure_close_wp
; trivial = pure_trivial
}
(** [PURE] computations can be silently promoted for use in a [DIV] context *)
sub_effect PURE ~> DIV { lift_wp = purewp_id }
(** [Div] is the Hoare-style counterpart of the wp-indexed [DIV] *)
unfold
let div_hoare_to_wp (#a:Type) (#pre:pure_pre) (post:pure_post' a pre) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
fun (p:pure_post a) -> pre /\ (forall a. post a ==> p a)
effect Div (a: Type) (pre: pure_pre) (post: pure_post' a pre) =
DIV a (div_hoare_to_wp post)
(** [Dv] is the instance of [DIV] with trivial pre- and postconditions *)
effect Dv (a: Type) = DIV a (pure_null_wp a)
(** We use the [EXT] effect to underspecify external system calls
as being impure but having no observable effect on the state *)
effect EXT (a: Type) = Dv a
/// The [STATE_h] effect template for stateful computations, generic
/// in the type of the state.
///
/// Note, [STATE_h] is itself not a computation type in F*, since it
/// is parameterized by the type of heap. However, instantiations of
/// [STATE_h] with specific types of the heap are computation
/// types. See, e.g., [FStar.ST] for such instantiations.
///
/// Weakest preconditions for stateful computations transform
/// [st_post_h] postconditions to [st_pre_h] preconditions. Both are
/// parametric in the type of the state, here denoted by the
/// [heap:Type] variable.
(** Preconditions are predicates on the [heap] *)
let st_pre_h (heap: Type) = heap -> GTot Type0
(** Postconditions relate [a]-typed results to the final [heap], here
refined by some pure proposition [pre], typically instantiated to
the precondition applied to the initial [heap] *)
let st_post_h' (heap a pre: Type) = a -> _: heap{pre} -> GTot Type0
(** Postconditions without refinements *)
let st_post_h (heap a: Type) = st_post_h' heap a True
(** The type of the main WP-transformer for stateful computations *)
let st_wp_h (heap a: Type) = st_post_h heap a -> Tot (st_pre_h heap)
(** Returning a value does not transform the state *)
unfold
let st_return (heap a: Type) (x: a) (p: st_post_h heap a) = p x
(** Sequential composition of stateful WPs *)
unfold
let st_bind_wp
(heap: Type)
(a b: Type)
(wp1: st_wp_h heap a)
(wp2: (a -> GTot (st_wp_h heap b)))
(p: st_post_h heap b)
(h0: heap)
= wp1 (fun a h1 -> wp2 a p h1) h0
(** Branching for stateful WPs *)
unfold
let st_if_then_else
(heap a p: Type)
(wp_then wp_else: st_wp_h heap a)
(post: st_post_h heap a)
(h0: heap)
= wp_then post h0 /\ (~p ==> wp_else post h0)
(** As with [PURE] the [wp] combinator names the postcondition as
[k] to avoid duplicating it. *)
unfold
let st_ite_wp (heap a: Type) (wp: st_wp_h heap a) (post: st_post_h heap a) (h0: heap) =
forall (k: st_post_h heap a).
(forall (x: a) (h: heap). {:pattern (guard_free (k x h))} post x h ==> k x h) ==> wp k h0
(** Subsumption for stateful WPs *)
unfold
let st_stronger (heap a: Type) (wp1 wp2: st_wp_h heap a) =
(forall (p: st_post_h heap a) (h: heap). wp1 p h ==> wp2 p h)
(** Closing the scope of a binder within a stateful WP *)
unfold
let st_close_wp (heap a b: Type) (wp: (b -> GTot (st_wp_h heap a))) (p: st_post_h heap a) (h: heap) =
(forall (b: b). wp b p h)
(** Applying a stateful WP to a trivial postcondition *)
unfold
let st_trivial (heap a: Type) (wp: st_wp_h heap a) = (forall h0. wp (fun r h1 -> True) h0)
(** Introducing a new effect template [STATE_h] *)
new_effect {
STATE_h (heap: Type) : result: Type -> wp: st_wp_h heap result -> Effect
with
return_wp = st_return heap
; bind_wp = st_bind_wp heap
; if_then_else = st_if_then_else heap
; ite_wp = st_ite_wp heap
; stronger = st_stronger heap
; close_wp = st_close_wp heap
; trivial = st_trivial heap
}
/// The [EXN] effect for computations that may raise exceptions or
/// fatal errors
///
/// Weakest preconditions for stateful computations transform
/// [ex_post] postconditions (predicates on [result]s) to [ex_pre]
/// precondition propositions.
(** Normal results are represented using [V x].
Handleable exceptions are represented [E e].
Fatal errors are [Err msg]. *)
noeq
type result (a: Type) =
| V : v: a -> result a
| E : e: exn -> result a
| Err : msg: string -> result a
(** Exceptional preconditions are just propositions *)
let ex_pre = Type0
(** Postconditions on results refined by a precondition *)
let ex_post' (a pre: Type) = _: result a {pre} -> GTot Type0
(** Postconditions on results *)
let ex_post (a: Type) = ex_post' a True
(** Exceptions WP-predicate transformers *)
let ex_wp (a: Type) = ex_post a -> GTot ex_pre
(** Returning a value [x] normally promotes it to the [V x] result *)
unfold
let ex_return (a: Type) (x: a) (p: ex_post a) : GTot Type0 = p (V x)
(** Sequential composition of exception-raising code requires case analysing
the result of the first computation before "running" the second one *)
unfold
let ex_bind_wp (a b: Type) (wp1: ex_wp a) (wp2: (a -> GTot (ex_wp b))) (p: ex_post b)
: GTot Type0 =
forall (k: ex_post b).
(forall (rb: result b). {:pattern (guard_free (k rb))} p rb ==> k rb) ==>
(wp1 (function
| V ra1 -> wp2 ra1 k
| E e -> k (E e)
| Err m -> k (Err m)))
(** As for other effects, branching in [ex_wp] appears in two forms.
First, a simple case analysis on [p] *)
unfold
let ex_if_then_else (a p: Type) (wp_then wp_else: ex_wp a) (post: ex_post a) =
wp_then post /\ (~p ==> wp_else post)
(** Naming continuations for use with branching *)
unfold | false | false | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val ex_ite_wp : a: Type -> wp: FStar.Pervasives.ex_wp a -> post: FStar.Pervasives.ex_post a -> Prims.logical | [] | FStar.Pervasives.ex_ite_wp | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: Type -> wp: FStar.Pervasives.ex_wp a -> post: FStar.Pervasives.ex_post a -> Prims.logical | {
"end_col": 85,
"end_line": 559,
"start_col": 2,
"start_line": 558
} |
|
Prims.GTot | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let all_return (heap a: Type) (x: a) (p: all_post_h heap a) = p (V x) | let all_return (heap a: Type) (x: a) (p: all_post_h heap a) = | false | null | false | p (V x) | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"sometrivial"
] | [
"FStar.Pervasives.all_post_h",
"FStar.Pervasives.V",
"Prims.l_True"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold
let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp
unfold
let pure_close_wp (a b:Type) (wp:b -> Tot (pure_wp a)) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp
unfold
let pure_null_wp (a:Type) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_null_wp0 a
[@@ "opaque_to_smt"]
unfold
let pure_assert_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assert_wp0 p
[@@ "opaque_to_smt"]
unfold
let pure_assume_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assume_wp0 p
/// The [DIV] effect for divergent computations
///
/// The wp-calculus for [DIV] is same as that of [PURE]
(** The effect of divergence: from a specificational perspective it is
identical to PURE, however the specs are given a partial
correctness interpretation. Computations with the [DIV] effect may
not terminate. *)
new_effect {
DIV : a:Type -> wp:pure_wp a -> Effect
with
return_wp = pure_return
; bind_wp = pure_bind_wp
; if_then_else = pure_if_then_else
; ite_wp = pure_ite_wp
; stronger = pure_stronger
; close_wp = pure_close_wp
; trivial = pure_trivial
}
(** [PURE] computations can be silently promoted for use in a [DIV] context *)
sub_effect PURE ~> DIV { lift_wp = purewp_id }
(** [Div] is the Hoare-style counterpart of the wp-indexed [DIV] *)
unfold
let div_hoare_to_wp (#a:Type) (#pre:pure_pre) (post:pure_post' a pre) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
fun (p:pure_post a) -> pre /\ (forall a. post a ==> p a)
effect Div (a: Type) (pre: pure_pre) (post: pure_post' a pre) =
DIV a (div_hoare_to_wp post)
(** [Dv] is the instance of [DIV] with trivial pre- and postconditions *)
effect Dv (a: Type) = DIV a (pure_null_wp a)
(** We use the [EXT] effect to underspecify external system calls
as being impure but having no observable effect on the state *)
effect EXT (a: Type) = Dv a
/// The [STATE_h] effect template for stateful computations, generic
/// in the type of the state.
///
/// Note, [STATE_h] is itself not a computation type in F*, since it
/// is parameterized by the type of heap. However, instantiations of
/// [STATE_h] with specific types of the heap are computation
/// types. See, e.g., [FStar.ST] for such instantiations.
///
/// Weakest preconditions for stateful computations transform
/// [st_post_h] postconditions to [st_pre_h] preconditions. Both are
/// parametric in the type of the state, here denoted by the
/// [heap:Type] variable.
(** Preconditions are predicates on the [heap] *)
let st_pre_h (heap: Type) = heap -> GTot Type0
(** Postconditions relate [a]-typed results to the final [heap], here
refined by some pure proposition [pre], typically instantiated to
the precondition applied to the initial [heap] *)
let st_post_h' (heap a pre: Type) = a -> _: heap{pre} -> GTot Type0
(** Postconditions without refinements *)
let st_post_h (heap a: Type) = st_post_h' heap a True
(** The type of the main WP-transformer for stateful computations *)
let st_wp_h (heap a: Type) = st_post_h heap a -> Tot (st_pre_h heap)
(** Returning a value does not transform the state *)
unfold
let st_return (heap a: Type) (x: a) (p: st_post_h heap a) = p x
(** Sequential composition of stateful WPs *)
unfold
let st_bind_wp
(heap: Type)
(a b: Type)
(wp1: st_wp_h heap a)
(wp2: (a -> GTot (st_wp_h heap b)))
(p: st_post_h heap b)
(h0: heap)
= wp1 (fun a h1 -> wp2 a p h1) h0
(** Branching for stateful WPs *)
unfold
let st_if_then_else
(heap a p: Type)
(wp_then wp_else: st_wp_h heap a)
(post: st_post_h heap a)
(h0: heap)
= wp_then post h0 /\ (~p ==> wp_else post h0)
(** As with [PURE] the [wp] combinator names the postcondition as
[k] to avoid duplicating it. *)
unfold
let st_ite_wp (heap a: Type) (wp: st_wp_h heap a) (post: st_post_h heap a) (h0: heap) =
forall (k: st_post_h heap a).
(forall (x: a) (h: heap). {:pattern (guard_free (k x h))} post x h ==> k x h) ==> wp k h0
(** Subsumption for stateful WPs *)
unfold
let st_stronger (heap a: Type) (wp1 wp2: st_wp_h heap a) =
(forall (p: st_post_h heap a) (h: heap). wp1 p h ==> wp2 p h)
(** Closing the scope of a binder within a stateful WP *)
unfold
let st_close_wp (heap a b: Type) (wp: (b -> GTot (st_wp_h heap a))) (p: st_post_h heap a) (h: heap) =
(forall (b: b). wp b p h)
(** Applying a stateful WP to a trivial postcondition *)
unfold
let st_trivial (heap a: Type) (wp: st_wp_h heap a) = (forall h0. wp (fun r h1 -> True) h0)
(** Introducing a new effect template [STATE_h] *)
new_effect {
STATE_h (heap: Type) : result: Type -> wp: st_wp_h heap result -> Effect
with
return_wp = st_return heap
; bind_wp = st_bind_wp heap
; if_then_else = st_if_then_else heap
; ite_wp = st_ite_wp heap
; stronger = st_stronger heap
; close_wp = st_close_wp heap
; trivial = st_trivial heap
}
/// The [EXN] effect for computations that may raise exceptions or
/// fatal errors
///
/// Weakest preconditions for stateful computations transform
/// [ex_post] postconditions (predicates on [result]s) to [ex_pre]
/// precondition propositions.
(** Normal results are represented using [V x].
Handleable exceptions are represented [E e].
Fatal errors are [Err msg]. *)
noeq
type result (a: Type) =
| V : v: a -> result a
| E : e: exn -> result a
| Err : msg: string -> result a
(** Exceptional preconditions are just propositions *)
let ex_pre = Type0
(** Postconditions on results refined by a precondition *)
let ex_post' (a pre: Type) = _: result a {pre} -> GTot Type0
(** Postconditions on results *)
let ex_post (a: Type) = ex_post' a True
(** Exceptions WP-predicate transformers *)
let ex_wp (a: Type) = ex_post a -> GTot ex_pre
(** Returning a value [x] normally promotes it to the [V x] result *)
unfold
let ex_return (a: Type) (x: a) (p: ex_post a) : GTot Type0 = p (V x)
(** Sequential composition of exception-raising code requires case analysing
the result of the first computation before "running" the second one *)
unfold
let ex_bind_wp (a b: Type) (wp1: ex_wp a) (wp2: (a -> GTot (ex_wp b))) (p: ex_post b)
: GTot Type0 =
forall (k: ex_post b).
(forall (rb: result b). {:pattern (guard_free (k rb))} p rb ==> k rb) ==>
(wp1 (function
| V ra1 -> wp2 ra1 k
| E e -> k (E e)
| Err m -> k (Err m)))
(** As for other effects, branching in [ex_wp] appears in two forms.
First, a simple case analysis on [p] *)
unfold
let ex_if_then_else (a p: Type) (wp_then wp_else: ex_wp a) (post: ex_post a) =
wp_then post /\ (~p ==> wp_else post)
(** Naming continuations for use with branching *)
unfold
let ex_ite_wp (a: Type) (wp: ex_wp a) (post: ex_post a) =
forall (k: ex_post a).
(forall (rb: result a). {:pattern (guard_free (k rb))} post rb ==> k rb) ==> wp k
(** Subsumption for exceptional WPs *)
unfold
let ex_stronger (a: Type) (wp1 wp2: ex_wp a) = (forall (p: ex_post a). wp1 p ==> wp2 p)
(** Closing the scope of a binder for exceptional WPs *)
unfold
let ex_close_wp (a b: Type) (wp: (b -> GTot (ex_wp a))) (p: ex_post a) = (forall (b: b). wp b p)
(** Applying a computation with a trivial postcondition *)
unfold
let ex_trivial (a: Type) (wp: ex_wp a) = wp (fun r -> True)
(** Introduce a new effect for [EXN] *)
new_effect {
EXN : result: Type -> wp: ex_wp result -> Effect
with
return_wp = ex_return
; bind_wp = ex_bind_wp
; if_then_else = ex_if_then_else
; ite_wp = ex_ite_wp
; stronger = ex_stronger
; close_wp = ex_close_wp
; trivial = ex_trivial
}
(** A Hoare-style abbreviation for EXN *)
effect Exn (a: Type) (pre: ex_pre) (post: ex_post' a pre) =
EXN a (fun (p: ex_post a) -> pre /\ (forall (r: result a). post r ==> p r))
(** We include divergence in exceptions.
NOTE: BE WARNED, CODE IN THE [EXN] EFFECT IS ONLY CHECKED FOR
PARTIAL CORRECTNESS *)
unfold
let lift_div_exn (a: Type) (wp: pure_wp a) (p: ex_post a) = wp (fun a -> p (V a))
sub_effect DIV ~> EXN { lift_wp = lift_div_exn }
(** A variant of [Exn] with trivial pre- and postconditions *)
effect Ex (a: Type) = Exn a True (fun v -> True)
/// The [ALL_h] effect template for computations that may diverge,
/// raise exceptions or fatal errors, and uses a generic state.
///
/// Note, this effect is poorly named, particularly as F* has since
/// gained many more user-defined effect. We no longer have an effect
/// that includes all others.
///
/// We might rename this in the future to something like [StExnDiv_h].
///
/// We layer state on top of exceptions, meaning that raising an
/// exception does not discard the state.
///
/// As with [STATE_h], [ALL_h] is not a computation type, though its
/// instantiation with a specific type of [heap] (in FStar.All) is.
(** [all_pre_h] is a predicate on the initial state *)
let all_pre_h (h: Type) = h -> GTot Type0
(** Postconditions relate [result]s to final [heap]s refined by a precondition *)
let all_post_h' (h a pre: Type) = result a -> _: h{pre} -> GTot Type0
(** A variant of [all_post_h'] without the precondition refinement *)
let all_post_h (h a: Type) = all_post_h' h a True
(** WP predicate transformers for the [All_h] effect template *)
let all_wp_h (h a: Type) = all_post_h h a -> Tot (all_pre_h h)
(** Returning a value [x] normally promotes it to the [V x] result
without touching the [heap] *) | false | false | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val all_return : heap: Type -> a: Type -> x: a -> p: FStar.Pervasives.all_post_h heap a -> _: heap{Prims.l_True}
-> Prims.GTot Type0 | [] | FStar.Pervasives.all_return | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | heap: Type -> a: Type -> x: a -> p: FStar.Pervasives.all_post_h heap a -> _: heap{Prims.l_True}
-> Prims.GTot Type0 | {
"end_col": 69,
"end_line": 631,
"start_col": 62,
"start_line": 631
} |
|
Prims.Tot | val dsnd (#a: Type) (#b: (a -> GTot Type)) (t: dtuple2 a b) : Tot (b (Mkdtuple2?._1 t)) | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let dsnd (#a: Type) (#b: a -> GTot Type) (t: dtuple2 a b)
: Tot (b (Mkdtuple2?._1 t))
= Mkdtuple2?._2 t | val dsnd (#a: Type) (#b: (a -> GTot Type)) (t: dtuple2 a b) : Tot (b (Mkdtuple2?._1 t))
let dsnd (#a: Type) (#b: (a -> GTot Type)) (t: dtuple2 a b) : Tot (b (Mkdtuple2?._1 t)) = | false | null | false | Mkdtuple2?._2 t | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"total"
] | [
"Prims.dtuple2",
"Prims.__proj__Mkdtuple2__item___2",
"Prims.__proj__Mkdtuple2__item___1"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold
let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp
unfold
let pure_close_wp (a b:Type) (wp:b -> Tot (pure_wp a)) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp
unfold
let pure_null_wp (a:Type) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_null_wp0 a
[@@ "opaque_to_smt"]
unfold
let pure_assert_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assert_wp0 p
[@@ "opaque_to_smt"]
unfold
let pure_assume_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assume_wp0 p
/// The [DIV] effect for divergent computations
///
/// The wp-calculus for [DIV] is same as that of [PURE]
(** The effect of divergence: from a specificational perspective it is
identical to PURE, however the specs are given a partial
correctness interpretation. Computations with the [DIV] effect may
not terminate. *)
new_effect {
DIV : a:Type -> wp:pure_wp a -> Effect
with
return_wp = pure_return
; bind_wp = pure_bind_wp
; if_then_else = pure_if_then_else
; ite_wp = pure_ite_wp
; stronger = pure_stronger
; close_wp = pure_close_wp
; trivial = pure_trivial
}
(** [PURE] computations can be silently promoted for use in a [DIV] context *)
sub_effect PURE ~> DIV { lift_wp = purewp_id }
(** [Div] is the Hoare-style counterpart of the wp-indexed [DIV] *)
unfold
let div_hoare_to_wp (#a:Type) (#pre:pure_pre) (post:pure_post' a pre) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
fun (p:pure_post a) -> pre /\ (forall a. post a ==> p a)
effect Div (a: Type) (pre: pure_pre) (post: pure_post' a pre) =
DIV a (div_hoare_to_wp post)
(** [Dv] is the instance of [DIV] with trivial pre- and postconditions *)
effect Dv (a: Type) = DIV a (pure_null_wp a)
(** We use the [EXT] effect to underspecify external system calls
as being impure but having no observable effect on the state *)
effect EXT (a: Type) = Dv a
/// The [STATE_h] effect template for stateful computations, generic
/// in the type of the state.
///
/// Note, [STATE_h] is itself not a computation type in F*, since it
/// is parameterized by the type of heap. However, instantiations of
/// [STATE_h] with specific types of the heap are computation
/// types. See, e.g., [FStar.ST] for such instantiations.
///
/// Weakest preconditions for stateful computations transform
/// [st_post_h] postconditions to [st_pre_h] preconditions. Both are
/// parametric in the type of the state, here denoted by the
/// [heap:Type] variable.
(** Preconditions are predicates on the [heap] *)
let st_pre_h (heap: Type) = heap -> GTot Type0
(** Postconditions relate [a]-typed results to the final [heap], here
refined by some pure proposition [pre], typically instantiated to
the precondition applied to the initial [heap] *)
let st_post_h' (heap a pre: Type) = a -> _: heap{pre} -> GTot Type0
(** Postconditions without refinements *)
let st_post_h (heap a: Type) = st_post_h' heap a True
(** The type of the main WP-transformer for stateful computations *)
let st_wp_h (heap a: Type) = st_post_h heap a -> Tot (st_pre_h heap)
(** Returning a value does not transform the state *)
unfold
let st_return (heap a: Type) (x: a) (p: st_post_h heap a) = p x
(** Sequential composition of stateful WPs *)
unfold
let st_bind_wp
(heap: Type)
(a b: Type)
(wp1: st_wp_h heap a)
(wp2: (a -> GTot (st_wp_h heap b)))
(p: st_post_h heap b)
(h0: heap)
= wp1 (fun a h1 -> wp2 a p h1) h0
(** Branching for stateful WPs *)
unfold
let st_if_then_else
(heap a p: Type)
(wp_then wp_else: st_wp_h heap a)
(post: st_post_h heap a)
(h0: heap)
= wp_then post h0 /\ (~p ==> wp_else post h0)
(** As with [PURE] the [wp] combinator names the postcondition as
[k] to avoid duplicating it. *)
unfold
let st_ite_wp (heap a: Type) (wp: st_wp_h heap a) (post: st_post_h heap a) (h0: heap) =
forall (k: st_post_h heap a).
(forall (x: a) (h: heap). {:pattern (guard_free (k x h))} post x h ==> k x h) ==> wp k h0
(** Subsumption for stateful WPs *)
unfold
let st_stronger (heap a: Type) (wp1 wp2: st_wp_h heap a) =
(forall (p: st_post_h heap a) (h: heap). wp1 p h ==> wp2 p h)
(** Closing the scope of a binder within a stateful WP *)
unfold
let st_close_wp (heap a b: Type) (wp: (b -> GTot (st_wp_h heap a))) (p: st_post_h heap a) (h: heap) =
(forall (b: b). wp b p h)
(** Applying a stateful WP to a trivial postcondition *)
unfold
let st_trivial (heap a: Type) (wp: st_wp_h heap a) = (forall h0. wp (fun r h1 -> True) h0)
(** Introducing a new effect template [STATE_h] *)
new_effect {
STATE_h (heap: Type) : result: Type -> wp: st_wp_h heap result -> Effect
with
return_wp = st_return heap
; bind_wp = st_bind_wp heap
; if_then_else = st_if_then_else heap
; ite_wp = st_ite_wp heap
; stronger = st_stronger heap
; close_wp = st_close_wp heap
; trivial = st_trivial heap
}
/// The [EXN] effect for computations that may raise exceptions or
/// fatal errors
///
/// Weakest preconditions for stateful computations transform
/// [ex_post] postconditions (predicates on [result]s) to [ex_pre]
/// precondition propositions.
(** Normal results are represented using [V x].
Handleable exceptions are represented [E e].
Fatal errors are [Err msg]. *)
noeq
type result (a: Type) =
| V : v: a -> result a
| E : e: exn -> result a
| Err : msg: string -> result a
(** Exceptional preconditions are just propositions *)
let ex_pre = Type0
(** Postconditions on results refined by a precondition *)
let ex_post' (a pre: Type) = _: result a {pre} -> GTot Type0
(** Postconditions on results *)
let ex_post (a: Type) = ex_post' a True
(** Exceptions WP-predicate transformers *)
let ex_wp (a: Type) = ex_post a -> GTot ex_pre
(** Returning a value [x] normally promotes it to the [V x] result *)
unfold
let ex_return (a: Type) (x: a) (p: ex_post a) : GTot Type0 = p (V x)
(** Sequential composition of exception-raising code requires case analysing
the result of the first computation before "running" the second one *)
unfold
let ex_bind_wp (a b: Type) (wp1: ex_wp a) (wp2: (a -> GTot (ex_wp b))) (p: ex_post b)
: GTot Type0 =
forall (k: ex_post b).
(forall (rb: result b). {:pattern (guard_free (k rb))} p rb ==> k rb) ==>
(wp1 (function
| V ra1 -> wp2 ra1 k
| E e -> k (E e)
| Err m -> k (Err m)))
(** As for other effects, branching in [ex_wp] appears in two forms.
First, a simple case analysis on [p] *)
unfold
let ex_if_then_else (a p: Type) (wp_then wp_else: ex_wp a) (post: ex_post a) =
wp_then post /\ (~p ==> wp_else post)
(** Naming continuations for use with branching *)
unfold
let ex_ite_wp (a: Type) (wp: ex_wp a) (post: ex_post a) =
forall (k: ex_post a).
(forall (rb: result a). {:pattern (guard_free (k rb))} post rb ==> k rb) ==> wp k
(** Subsumption for exceptional WPs *)
unfold
let ex_stronger (a: Type) (wp1 wp2: ex_wp a) = (forall (p: ex_post a). wp1 p ==> wp2 p)
(** Closing the scope of a binder for exceptional WPs *)
unfold
let ex_close_wp (a b: Type) (wp: (b -> GTot (ex_wp a))) (p: ex_post a) = (forall (b: b). wp b p)
(** Applying a computation with a trivial postcondition *)
unfold
let ex_trivial (a: Type) (wp: ex_wp a) = wp (fun r -> True)
(** Introduce a new effect for [EXN] *)
new_effect {
EXN : result: Type -> wp: ex_wp result -> Effect
with
return_wp = ex_return
; bind_wp = ex_bind_wp
; if_then_else = ex_if_then_else
; ite_wp = ex_ite_wp
; stronger = ex_stronger
; close_wp = ex_close_wp
; trivial = ex_trivial
}
(** A Hoare-style abbreviation for EXN *)
effect Exn (a: Type) (pre: ex_pre) (post: ex_post' a pre) =
EXN a (fun (p: ex_post a) -> pre /\ (forall (r: result a). post r ==> p r))
(** We include divergence in exceptions.
NOTE: BE WARNED, CODE IN THE [EXN] EFFECT IS ONLY CHECKED FOR
PARTIAL CORRECTNESS *)
unfold
let lift_div_exn (a: Type) (wp: pure_wp a) (p: ex_post a) = wp (fun a -> p (V a))
sub_effect DIV ~> EXN { lift_wp = lift_div_exn }
(** A variant of [Exn] with trivial pre- and postconditions *)
effect Ex (a: Type) = Exn a True (fun v -> True)
/// The [ALL_h] effect template for computations that may diverge,
/// raise exceptions or fatal errors, and uses a generic state.
///
/// Note, this effect is poorly named, particularly as F* has since
/// gained many more user-defined effect. We no longer have an effect
/// that includes all others.
///
/// We might rename this in the future to something like [StExnDiv_h].
///
/// We layer state on top of exceptions, meaning that raising an
/// exception does not discard the state.
///
/// As with [STATE_h], [ALL_h] is not a computation type, though its
/// instantiation with a specific type of [heap] (in FStar.All) is.
(** [all_pre_h] is a predicate on the initial state *)
let all_pre_h (h: Type) = h -> GTot Type0
(** Postconditions relate [result]s to final [heap]s refined by a precondition *)
let all_post_h' (h a pre: Type) = result a -> _: h{pre} -> GTot Type0
(** A variant of [all_post_h'] without the precondition refinement *)
let all_post_h (h a: Type) = all_post_h' h a True
(** WP predicate transformers for the [All_h] effect template *)
let all_wp_h (h a: Type) = all_post_h h a -> Tot (all_pre_h h)
(** Returning a value [x] normally promotes it to the [V x] result
without touching the [heap] *)
unfold
let all_return (heap a: Type) (x: a) (p: all_post_h heap a) = p (V x)
(** Sequential composition for [ALL_h] is like [EXN]: case analysis of
the exceptional result before "running" the continuation *)
unfold
let all_bind_wp
(heap: Type)
(a b: Type)
(wp1: all_wp_h heap a)
(wp2: (a -> GTot (all_wp_h heap b)))
(p: all_post_h heap b)
(h0: heap)
: GTot Type0 =
wp1 (fun ra h1 ->
(match ra with
| V v -> wp2 v p h1
| E e -> p (E e) h1
| Err msg -> p (Err msg) h1))
h0
(** Case analysis in [ALL_h] *)
unfold
let all_if_then_else
(heap a p: Type)
(wp_then wp_else: all_wp_h heap a)
(post: all_post_h heap a)
(h0: heap)
= wp_then post h0 /\ (~p ==> wp_else post h0)
(** Naming postcondition for better sharing in [ALL_h] *)
unfold
let all_ite_wp (heap a: Type) (wp: all_wp_h heap a) (post: all_post_h heap a) (h0: heap) =
forall (k: all_post_h heap a).
(forall (x: result a) (h: heap). {:pattern (guard_free (k x h))} post x h ==> k x h) ==> wp k h0
(** Subsumption in [ALL_h] *)
unfold
let all_stronger (heap a: Type) (wp1 wp2: all_wp_h heap a) =
(forall (p: all_post_h heap a) (h: heap). wp1 p h ==> wp2 p h)
(** Closing a binder in the scope of an [ALL_h] wp *)
unfold
let all_close_wp
(heap a b: Type)
(wp: (b -> GTot (all_wp_h heap a)))
(p: all_post_h heap a)
(h: heap)
= (forall (b: b). wp b p h)
(** Applying an [ALL_h] wp to a trivial postcondition *)
unfold
let all_trivial (heap a: Type) (wp: all_wp_h heap a) = (forall (h0: heap). wp (fun r h1 -> True) h0)
(** Introducing the [ALL_h] effect template *)
new_effect {
ALL_h (heap: Type) : a: Type -> wp: all_wp_h heap a -> Effect
with
return_wp = all_return heap
; bind_wp = all_bind_wp heap
; if_then_else = all_if_then_else heap
; ite_wp = all_ite_wp heap
; stronger = all_stronger heap
; close_wp = all_close_wp heap
; trivial = all_trivial heap
}
(**
Controlling inversions of inductive type
Given a value of an inductive type [v:t], where [t = A | B], the SMT
solver can only prove that [v=A \/ v=B] by _inverting_ [t]. This
inversion is controlled by the [ifuel] setting, which usually limits
the recursion depth of the number of such inversions that the solver
can perform.
The [inversion] predicate below is a way to circumvent the
[ifuel]-based restrictions on inversion depth. In particular, if the
[inversion t] is available in the SMT solver's context, it is free to
invert [t] infinitely, regardless of the [ifuel] setting.
Be careful using this, since it explicitly subverts the [ifuel]
setting. If used unwisely, this can lead to very poor SMT solver
performance. *)
[@@ remove_unused_type_parameters [0]]
val inversion (a: Type) : Type0
(** To introduce [inversion t] in the SMT solver's context, call
[allow_inversion t]. *)
val allow_inversion (a: Type) : Pure unit (requires True) (ensures (fun x -> inversion a))
(** Since the [option] type is so common, we always allow inverting
options, regardless of [ifuel] *)
val invertOption (a: Type)
: Lemma (requires True) (ensures (forall (x: option a). None? x \/ Some? x)) [SMTPat (option a)]
(** Values of type [a] or type [b] *)
type either a b =
| Inl : v: a -> either a b
| Inr : v: b -> either a b
(** Projections for the components of a dependent pair *)
let dfst (#a: Type) (#b: a -> GTot Type) (t: dtuple2 a b)
: Tot a
= Mkdtuple2?._1 t
let dsnd (#a: Type) (#b: a -> GTot Type) (t: dtuple2 a b) | false | false | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val dsnd (#a: Type) (#b: (a -> GTot Type)) (t: dtuple2 a b) : Tot (b (Mkdtuple2?._1 t)) | [] | FStar.Pervasives.dsnd | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | t: Prims.dtuple2 a b -> b (Mkdtuple2?._1 t) | {
"end_col": 19,
"end_line": 738,
"start_col": 4,
"start_line": 738
} |
Prims.Tot | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let all_wp_h (h a: Type) = all_post_h h a -> Tot (all_pre_h h) | let all_wp_h (h a: Type) = | false | null | false | all_post_h h a -> Tot (all_pre_h h) | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"total"
] | [
"FStar.Pervasives.all_post_h",
"FStar.Pervasives.all_pre_h"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold
let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp
unfold
let pure_close_wp (a b:Type) (wp:b -> Tot (pure_wp a)) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp
unfold
let pure_null_wp (a:Type) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_null_wp0 a
[@@ "opaque_to_smt"]
unfold
let pure_assert_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assert_wp0 p
[@@ "opaque_to_smt"]
unfold
let pure_assume_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assume_wp0 p
/// The [DIV] effect for divergent computations
///
/// The wp-calculus for [DIV] is same as that of [PURE]
(** The effect of divergence: from a specificational perspective it is
identical to PURE, however the specs are given a partial
correctness interpretation. Computations with the [DIV] effect may
not terminate. *)
new_effect {
DIV : a:Type -> wp:pure_wp a -> Effect
with
return_wp = pure_return
; bind_wp = pure_bind_wp
; if_then_else = pure_if_then_else
; ite_wp = pure_ite_wp
; stronger = pure_stronger
; close_wp = pure_close_wp
; trivial = pure_trivial
}
(** [PURE] computations can be silently promoted for use in a [DIV] context *)
sub_effect PURE ~> DIV { lift_wp = purewp_id }
(** [Div] is the Hoare-style counterpart of the wp-indexed [DIV] *)
unfold
let div_hoare_to_wp (#a:Type) (#pre:pure_pre) (post:pure_post' a pre) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
fun (p:pure_post a) -> pre /\ (forall a. post a ==> p a)
effect Div (a: Type) (pre: pure_pre) (post: pure_post' a pre) =
DIV a (div_hoare_to_wp post)
(** [Dv] is the instance of [DIV] with trivial pre- and postconditions *)
effect Dv (a: Type) = DIV a (pure_null_wp a)
(** We use the [EXT] effect to underspecify external system calls
as being impure but having no observable effect on the state *)
effect EXT (a: Type) = Dv a
/// The [STATE_h] effect template for stateful computations, generic
/// in the type of the state.
///
/// Note, [STATE_h] is itself not a computation type in F*, since it
/// is parameterized by the type of heap. However, instantiations of
/// [STATE_h] with specific types of the heap are computation
/// types. See, e.g., [FStar.ST] for such instantiations.
///
/// Weakest preconditions for stateful computations transform
/// [st_post_h] postconditions to [st_pre_h] preconditions. Both are
/// parametric in the type of the state, here denoted by the
/// [heap:Type] variable.
(** Preconditions are predicates on the [heap] *)
let st_pre_h (heap: Type) = heap -> GTot Type0
(** Postconditions relate [a]-typed results to the final [heap], here
refined by some pure proposition [pre], typically instantiated to
the precondition applied to the initial [heap] *)
let st_post_h' (heap a pre: Type) = a -> _: heap{pre} -> GTot Type0
(** Postconditions without refinements *)
let st_post_h (heap a: Type) = st_post_h' heap a True
(** The type of the main WP-transformer for stateful computations *)
let st_wp_h (heap a: Type) = st_post_h heap a -> Tot (st_pre_h heap)
(** Returning a value does not transform the state *)
unfold
let st_return (heap a: Type) (x: a) (p: st_post_h heap a) = p x
(** Sequential composition of stateful WPs *)
unfold
let st_bind_wp
(heap: Type)
(a b: Type)
(wp1: st_wp_h heap a)
(wp2: (a -> GTot (st_wp_h heap b)))
(p: st_post_h heap b)
(h0: heap)
= wp1 (fun a h1 -> wp2 a p h1) h0
(** Branching for stateful WPs *)
unfold
let st_if_then_else
(heap a p: Type)
(wp_then wp_else: st_wp_h heap a)
(post: st_post_h heap a)
(h0: heap)
= wp_then post h0 /\ (~p ==> wp_else post h0)
(** As with [PURE] the [wp] combinator names the postcondition as
[k] to avoid duplicating it. *)
unfold
let st_ite_wp (heap a: Type) (wp: st_wp_h heap a) (post: st_post_h heap a) (h0: heap) =
forall (k: st_post_h heap a).
(forall (x: a) (h: heap). {:pattern (guard_free (k x h))} post x h ==> k x h) ==> wp k h0
(** Subsumption for stateful WPs *)
unfold
let st_stronger (heap a: Type) (wp1 wp2: st_wp_h heap a) =
(forall (p: st_post_h heap a) (h: heap). wp1 p h ==> wp2 p h)
(** Closing the scope of a binder within a stateful WP *)
unfold
let st_close_wp (heap a b: Type) (wp: (b -> GTot (st_wp_h heap a))) (p: st_post_h heap a) (h: heap) =
(forall (b: b). wp b p h)
(** Applying a stateful WP to a trivial postcondition *)
unfold
let st_trivial (heap a: Type) (wp: st_wp_h heap a) = (forall h0. wp (fun r h1 -> True) h0)
(** Introducing a new effect template [STATE_h] *)
new_effect {
STATE_h (heap: Type) : result: Type -> wp: st_wp_h heap result -> Effect
with
return_wp = st_return heap
; bind_wp = st_bind_wp heap
; if_then_else = st_if_then_else heap
; ite_wp = st_ite_wp heap
; stronger = st_stronger heap
; close_wp = st_close_wp heap
; trivial = st_trivial heap
}
/// The [EXN] effect for computations that may raise exceptions or
/// fatal errors
///
/// Weakest preconditions for stateful computations transform
/// [ex_post] postconditions (predicates on [result]s) to [ex_pre]
/// precondition propositions.
(** Normal results are represented using [V x].
Handleable exceptions are represented [E e].
Fatal errors are [Err msg]. *)
noeq
type result (a: Type) =
| V : v: a -> result a
| E : e: exn -> result a
| Err : msg: string -> result a
(** Exceptional preconditions are just propositions *)
let ex_pre = Type0
(** Postconditions on results refined by a precondition *)
let ex_post' (a pre: Type) = _: result a {pre} -> GTot Type0
(** Postconditions on results *)
let ex_post (a: Type) = ex_post' a True
(** Exceptions WP-predicate transformers *)
let ex_wp (a: Type) = ex_post a -> GTot ex_pre
(** Returning a value [x] normally promotes it to the [V x] result *)
unfold
let ex_return (a: Type) (x: a) (p: ex_post a) : GTot Type0 = p (V x)
(** Sequential composition of exception-raising code requires case analysing
the result of the first computation before "running" the second one *)
unfold
let ex_bind_wp (a b: Type) (wp1: ex_wp a) (wp2: (a -> GTot (ex_wp b))) (p: ex_post b)
: GTot Type0 =
forall (k: ex_post b).
(forall (rb: result b). {:pattern (guard_free (k rb))} p rb ==> k rb) ==>
(wp1 (function
| V ra1 -> wp2 ra1 k
| E e -> k (E e)
| Err m -> k (Err m)))
(** As for other effects, branching in [ex_wp] appears in two forms.
First, a simple case analysis on [p] *)
unfold
let ex_if_then_else (a p: Type) (wp_then wp_else: ex_wp a) (post: ex_post a) =
wp_then post /\ (~p ==> wp_else post)
(** Naming continuations for use with branching *)
unfold
let ex_ite_wp (a: Type) (wp: ex_wp a) (post: ex_post a) =
forall (k: ex_post a).
(forall (rb: result a). {:pattern (guard_free (k rb))} post rb ==> k rb) ==> wp k
(** Subsumption for exceptional WPs *)
unfold
let ex_stronger (a: Type) (wp1 wp2: ex_wp a) = (forall (p: ex_post a). wp1 p ==> wp2 p)
(** Closing the scope of a binder for exceptional WPs *)
unfold
let ex_close_wp (a b: Type) (wp: (b -> GTot (ex_wp a))) (p: ex_post a) = (forall (b: b). wp b p)
(** Applying a computation with a trivial postcondition *)
unfold
let ex_trivial (a: Type) (wp: ex_wp a) = wp (fun r -> True)
(** Introduce a new effect for [EXN] *)
new_effect {
EXN : result: Type -> wp: ex_wp result -> Effect
with
return_wp = ex_return
; bind_wp = ex_bind_wp
; if_then_else = ex_if_then_else
; ite_wp = ex_ite_wp
; stronger = ex_stronger
; close_wp = ex_close_wp
; trivial = ex_trivial
}
(** A Hoare-style abbreviation for EXN *)
effect Exn (a: Type) (pre: ex_pre) (post: ex_post' a pre) =
EXN a (fun (p: ex_post a) -> pre /\ (forall (r: result a). post r ==> p r))
(** We include divergence in exceptions.
NOTE: BE WARNED, CODE IN THE [EXN] EFFECT IS ONLY CHECKED FOR
PARTIAL CORRECTNESS *)
unfold
let lift_div_exn (a: Type) (wp: pure_wp a) (p: ex_post a) = wp (fun a -> p (V a))
sub_effect DIV ~> EXN { lift_wp = lift_div_exn }
(** A variant of [Exn] with trivial pre- and postconditions *)
effect Ex (a: Type) = Exn a True (fun v -> True)
/// The [ALL_h] effect template for computations that may diverge,
/// raise exceptions or fatal errors, and uses a generic state.
///
/// Note, this effect is poorly named, particularly as F* has since
/// gained many more user-defined effect. We no longer have an effect
/// that includes all others.
///
/// We might rename this in the future to something like [StExnDiv_h].
///
/// We layer state on top of exceptions, meaning that raising an
/// exception does not discard the state.
///
/// As with [STATE_h], [ALL_h] is not a computation type, though its
/// instantiation with a specific type of [heap] (in FStar.All) is.
(** [all_pre_h] is a predicate on the initial state *)
let all_pre_h (h: Type) = h -> GTot Type0
(** Postconditions relate [result]s to final [heap]s refined by a precondition *)
let all_post_h' (h a pre: Type) = result a -> _: h{pre} -> GTot Type0
(** A variant of [all_post_h'] without the precondition refinement *)
let all_post_h (h a: Type) = all_post_h' h a True | false | true | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val all_wp_h : h: Type -> a: Type -> Type | [] | FStar.Pervasives.all_wp_h | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | h: Type -> a: Type -> Type | {
"end_col": 62,
"end_line": 626,
"start_col": 27,
"start_line": 626
} |
|
Prims.Tot | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let ex_close_wp (a b: Type) (wp: (b -> GTot (ex_wp a))) (p: ex_post a) = (forall (b: b). wp b p) | let ex_close_wp (a b: Type) (wp: (b -> GTot (ex_wp a))) (p: ex_post a) = | false | null | false | (forall (b: b). wp b p) | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"total"
] | [
"FStar.Pervasives.ex_wp",
"FStar.Pervasives.ex_post",
"Prims.l_Forall",
"Prims.logical"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold
let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp
unfold
let pure_close_wp (a b:Type) (wp:b -> Tot (pure_wp a)) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp
unfold
let pure_null_wp (a:Type) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_null_wp0 a
[@@ "opaque_to_smt"]
unfold
let pure_assert_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assert_wp0 p
[@@ "opaque_to_smt"]
unfold
let pure_assume_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assume_wp0 p
/// The [DIV] effect for divergent computations
///
/// The wp-calculus for [DIV] is same as that of [PURE]
(** The effect of divergence: from a specificational perspective it is
identical to PURE, however the specs are given a partial
correctness interpretation. Computations with the [DIV] effect may
not terminate. *)
new_effect {
DIV : a:Type -> wp:pure_wp a -> Effect
with
return_wp = pure_return
; bind_wp = pure_bind_wp
; if_then_else = pure_if_then_else
; ite_wp = pure_ite_wp
; stronger = pure_stronger
; close_wp = pure_close_wp
; trivial = pure_trivial
}
(** [PURE] computations can be silently promoted for use in a [DIV] context *)
sub_effect PURE ~> DIV { lift_wp = purewp_id }
(** [Div] is the Hoare-style counterpart of the wp-indexed [DIV] *)
unfold
let div_hoare_to_wp (#a:Type) (#pre:pure_pre) (post:pure_post' a pre) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
fun (p:pure_post a) -> pre /\ (forall a. post a ==> p a)
effect Div (a: Type) (pre: pure_pre) (post: pure_post' a pre) =
DIV a (div_hoare_to_wp post)
(** [Dv] is the instance of [DIV] with trivial pre- and postconditions *)
effect Dv (a: Type) = DIV a (pure_null_wp a)
(** We use the [EXT] effect to underspecify external system calls
as being impure but having no observable effect on the state *)
effect EXT (a: Type) = Dv a
/// The [STATE_h] effect template for stateful computations, generic
/// in the type of the state.
///
/// Note, [STATE_h] is itself not a computation type in F*, since it
/// is parameterized by the type of heap. However, instantiations of
/// [STATE_h] with specific types of the heap are computation
/// types. See, e.g., [FStar.ST] for such instantiations.
///
/// Weakest preconditions for stateful computations transform
/// [st_post_h] postconditions to [st_pre_h] preconditions. Both are
/// parametric in the type of the state, here denoted by the
/// [heap:Type] variable.
(** Preconditions are predicates on the [heap] *)
let st_pre_h (heap: Type) = heap -> GTot Type0
(** Postconditions relate [a]-typed results to the final [heap], here
refined by some pure proposition [pre], typically instantiated to
the precondition applied to the initial [heap] *)
let st_post_h' (heap a pre: Type) = a -> _: heap{pre} -> GTot Type0
(** Postconditions without refinements *)
let st_post_h (heap a: Type) = st_post_h' heap a True
(** The type of the main WP-transformer for stateful computations *)
let st_wp_h (heap a: Type) = st_post_h heap a -> Tot (st_pre_h heap)
(** Returning a value does not transform the state *)
unfold
let st_return (heap a: Type) (x: a) (p: st_post_h heap a) = p x
(** Sequential composition of stateful WPs *)
unfold
let st_bind_wp
(heap: Type)
(a b: Type)
(wp1: st_wp_h heap a)
(wp2: (a -> GTot (st_wp_h heap b)))
(p: st_post_h heap b)
(h0: heap)
= wp1 (fun a h1 -> wp2 a p h1) h0
(** Branching for stateful WPs *)
unfold
let st_if_then_else
(heap a p: Type)
(wp_then wp_else: st_wp_h heap a)
(post: st_post_h heap a)
(h0: heap)
= wp_then post h0 /\ (~p ==> wp_else post h0)
(** As with [PURE] the [wp] combinator names the postcondition as
[k] to avoid duplicating it. *)
unfold
let st_ite_wp (heap a: Type) (wp: st_wp_h heap a) (post: st_post_h heap a) (h0: heap) =
forall (k: st_post_h heap a).
(forall (x: a) (h: heap). {:pattern (guard_free (k x h))} post x h ==> k x h) ==> wp k h0
(** Subsumption for stateful WPs *)
unfold
let st_stronger (heap a: Type) (wp1 wp2: st_wp_h heap a) =
(forall (p: st_post_h heap a) (h: heap). wp1 p h ==> wp2 p h)
(** Closing the scope of a binder within a stateful WP *)
unfold
let st_close_wp (heap a b: Type) (wp: (b -> GTot (st_wp_h heap a))) (p: st_post_h heap a) (h: heap) =
(forall (b: b). wp b p h)
(** Applying a stateful WP to a trivial postcondition *)
unfold
let st_trivial (heap a: Type) (wp: st_wp_h heap a) = (forall h0. wp (fun r h1 -> True) h0)
(** Introducing a new effect template [STATE_h] *)
new_effect {
STATE_h (heap: Type) : result: Type -> wp: st_wp_h heap result -> Effect
with
return_wp = st_return heap
; bind_wp = st_bind_wp heap
; if_then_else = st_if_then_else heap
; ite_wp = st_ite_wp heap
; stronger = st_stronger heap
; close_wp = st_close_wp heap
; trivial = st_trivial heap
}
/// The [EXN] effect for computations that may raise exceptions or
/// fatal errors
///
/// Weakest preconditions for stateful computations transform
/// [ex_post] postconditions (predicates on [result]s) to [ex_pre]
/// precondition propositions.
(** Normal results are represented using [V x].
Handleable exceptions are represented [E e].
Fatal errors are [Err msg]. *)
noeq
type result (a: Type) =
| V : v: a -> result a
| E : e: exn -> result a
| Err : msg: string -> result a
(** Exceptional preconditions are just propositions *)
let ex_pre = Type0
(** Postconditions on results refined by a precondition *)
let ex_post' (a pre: Type) = _: result a {pre} -> GTot Type0
(** Postconditions on results *)
let ex_post (a: Type) = ex_post' a True
(** Exceptions WP-predicate transformers *)
let ex_wp (a: Type) = ex_post a -> GTot ex_pre
(** Returning a value [x] normally promotes it to the [V x] result *)
unfold
let ex_return (a: Type) (x: a) (p: ex_post a) : GTot Type0 = p (V x)
(** Sequential composition of exception-raising code requires case analysing
the result of the first computation before "running" the second one *)
unfold
let ex_bind_wp (a b: Type) (wp1: ex_wp a) (wp2: (a -> GTot (ex_wp b))) (p: ex_post b)
: GTot Type0 =
forall (k: ex_post b).
(forall (rb: result b). {:pattern (guard_free (k rb))} p rb ==> k rb) ==>
(wp1 (function
| V ra1 -> wp2 ra1 k
| E e -> k (E e)
| Err m -> k (Err m)))
(** As for other effects, branching in [ex_wp] appears in two forms.
First, a simple case analysis on [p] *)
unfold
let ex_if_then_else (a p: Type) (wp_then wp_else: ex_wp a) (post: ex_post a) =
wp_then post /\ (~p ==> wp_else post)
(** Naming continuations for use with branching *)
unfold
let ex_ite_wp (a: Type) (wp: ex_wp a) (post: ex_post a) =
forall (k: ex_post a).
(forall (rb: result a). {:pattern (guard_free (k rb))} post rb ==> k rb) ==> wp k
(** Subsumption for exceptional WPs *)
unfold
let ex_stronger (a: Type) (wp1 wp2: ex_wp a) = (forall (p: ex_post a). wp1 p ==> wp2 p)
(** Closing the scope of a binder for exceptional WPs *) | false | false | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val ex_close_wp : a: Type ->
b: Type ->
wp: (_: b -> Prims.GTot (FStar.Pervasives.ex_wp a)) ->
p: FStar.Pervasives.ex_post a
-> Prims.logical | [] | FStar.Pervasives.ex_close_wp | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
a: Type ->
b: Type ->
wp: (_: b -> Prims.GTot (FStar.Pervasives.ex_wp a)) ->
p: FStar.Pervasives.ex_post a
-> Prims.logical | {
"end_col": 96,
"end_line": 567,
"start_col": 73,
"start_line": 567
} |
|
Prims.Tot | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let all_post_h' (h a pre: Type) = result a -> _: h{pre} -> GTot Type0 | let all_post_h' (h a pre: Type) = | false | null | false | result a -> _: h{pre} -> GTot Type0 | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"total"
] | [
"FStar.Pervasives.result"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold
let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp
unfold
let pure_close_wp (a b:Type) (wp:b -> Tot (pure_wp a)) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp
unfold
let pure_null_wp (a:Type) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_null_wp0 a
[@@ "opaque_to_smt"]
unfold
let pure_assert_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assert_wp0 p
[@@ "opaque_to_smt"]
unfold
let pure_assume_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assume_wp0 p
/// The [DIV] effect for divergent computations
///
/// The wp-calculus for [DIV] is same as that of [PURE]
(** The effect of divergence: from a specificational perspective it is
identical to PURE, however the specs are given a partial
correctness interpretation. Computations with the [DIV] effect may
not terminate. *)
new_effect {
DIV : a:Type -> wp:pure_wp a -> Effect
with
return_wp = pure_return
; bind_wp = pure_bind_wp
; if_then_else = pure_if_then_else
; ite_wp = pure_ite_wp
; stronger = pure_stronger
; close_wp = pure_close_wp
; trivial = pure_trivial
}
(** [PURE] computations can be silently promoted for use in a [DIV] context *)
sub_effect PURE ~> DIV { lift_wp = purewp_id }
(** [Div] is the Hoare-style counterpart of the wp-indexed [DIV] *)
unfold
let div_hoare_to_wp (#a:Type) (#pre:pure_pre) (post:pure_post' a pre) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
fun (p:pure_post a) -> pre /\ (forall a. post a ==> p a)
effect Div (a: Type) (pre: pure_pre) (post: pure_post' a pre) =
DIV a (div_hoare_to_wp post)
(** [Dv] is the instance of [DIV] with trivial pre- and postconditions *)
effect Dv (a: Type) = DIV a (pure_null_wp a)
(** We use the [EXT] effect to underspecify external system calls
as being impure but having no observable effect on the state *)
effect EXT (a: Type) = Dv a
/// The [STATE_h] effect template for stateful computations, generic
/// in the type of the state.
///
/// Note, [STATE_h] is itself not a computation type in F*, since it
/// is parameterized by the type of heap. However, instantiations of
/// [STATE_h] with specific types of the heap are computation
/// types. See, e.g., [FStar.ST] for such instantiations.
///
/// Weakest preconditions for stateful computations transform
/// [st_post_h] postconditions to [st_pre_h] preconditions. Both are
/// parametric in the type of the state, here denoted by the
/// [heap:Type] variable.
(** Preconditions are predicates on the [heap] *)
let st_pre_h (heap: Type) = heap -> GTot Type0
(** Postconditions relate [a]-typed results to the final [heap], here
refined by some pure proposition [pre], typically instantiated to
the precondition applied to the initial [heap] *)
let st_post_h' (heap a pre: Type) = a -> _: heap{pre} -> GTot Type0
(** Postconditions without refinements *)
let st_post_h (heap a: Type) = st_post_h' heap a True
(** The type of the main WP-transformer for stateful computations *)
let st_wp_h (heap a: Type) = st_post_h heap a -> Tot (st_pre_h heap)
(** Returning a value does not transform the state *)
unfold
let st_return (heap a: Type) (x: a) (p: st_post_h heap a) = p x
(** Sequential composition of stateful WPs *)
unfold
let st_bind_wp
(heap: Type)
(a b: Type)
(wp1: st_wp_h heap a)
(wp2: (a -> GTot (st_wp_h heap b)))
(p: st_post_h heap b)
(h0: heap)
= wp1 (fun a h1 -> wp2 a p h1) h0
(** Branching for stateful WPs *)
unfold
let st_if_then_else
(heap a p: Type)
(wp_then wp_else: st_wp_h heap a)
(post: st_post_h heap a)
(h0: heap)
= wp_then post h0 /\ (~p ==> wp_else post h0)
(** As with [PURE] the [wp] combinator names the postcondition as
[k] to avoid duplicating it. *)
unfold
let st_ite_wp (heap a: Type) (wp: st_wp_h heap a) (post: st_post_h heap a) (h0: heap) =
forall (k: st_post_h heap a).
(forall (x: a) (h: heap). {:pattern (guard_free (k x h))} post x h ==> k x h) ==> wp k h0
(** Subsumption for stateful WPs *)
unfold
let st_stronger (heap a: Type) (wp1 wp2: st_wp_h heap a) =
(forall (p: st_post_h heap a) (h: heap). wp1 p h ==> wp2 p h)
(** Closing the scope of a binder within a stateful WP *)
unfold
let st_close_wp (heap a b: Type) (wp: (b -> GTot (st_wp_h heap a))) (p: st_post_h heap a) (h: heap) =
(forall (b: b). wp b p h)
(** Applying a stateful WP to a trivial postcondition *)
unfold
let st_trivial (heap a: Type) (wp: st_wp_h heap a) = (forall h0. wp (fun r h1 -> True) h0)
(** Introducing a new effect template [STATE_h] *)
new_effect {
STATE_h (heap: Type) : result: Type -> wp: st_wp_h heap result -> Effect
with
return_wp = st_return heap
; bind_wp = st_bind_wp heap
; if_then_else = st_if_then_else heap
; ite_wp = st_ite_wp heap
; stronger = st_stronger heap
; close_wp = st_close_wp heap
; trivial = st_trivial heap
}
/// The [EXN] effect for computations that may raise exceptions or
/// fatal errors
///
/// Weakest preconditions for stateful computations transform
/// [ex_post] postconditions (predicates on [result]s) to [ex_pre]
/// precondition propositions.
(** Normal results are represented using [V x].
Handleable exceptions are represented [E e].
Fatal errors are [Err msg]. *)
noeq
type result (a: Type) =
| V : v: a -> result a
| E : e: exn -> result a
| Err : msg: string -> result a
(** Exceptional preconditions are just propositions *)
let ex_pre = Type0
(** Postconditions on results refined by a precondition *)
let ex_post' (a pre: Type) = _: result a {pre} -> GTot Type0
(** Postconditions on results *)
let ex_post (a: Type) = ex_post' a True
(** Exceptions WP-predicate transformers *)
let ex_wp (a: Type) = ex_post a -> GTot ex_pre
(** Returning a value [x] normally promotes it to the [V x] result *)
unfold
let ex_return (a: Type) (x: a) (p: ex_post a) : GTot Type0 = p (V x)
(** Sequential composition of exception-raising code requires case analysing
the result of the first computation before "running" the second one *)
unfold
let ex_bind_wp (a b: Type) (wp1: ex_wp a) (wp2: (a -> GTot (ex_wp b))) (p: ex_post b)
: GTot Type0 =
forall (k: ex_post b).
(forall (rb: result b). {:pattern (guard_free (k rb))} p rb ==> k rb) ==>
(wp1 (function
| V ra1 -> wp2 ra1 k
| E e -> k (E e)
| Err m -> k (Err m)))
(** As for other effects, branching in [ex_wp] appears in two forms.
First, a simple case analysis on [p] *)
unfold
let ex_if_then_else (a p: Type) (wp_then wp_else: ex_wp a) (post: ex_post a) =
wp_then post /\ (~p ==> wp_else post)
(** Naming continuations for use with branching *)
unfold
let ex_ite_wp (a: Type) (wp: ex_wp a) (post: ex_post a) =
forall (k: ex_post a).
(forall (rb: result a). {:pattern (guard_free (k rb))} post rb ==> k rb) ==> wp k
(** Subsumption for exceptional WPs *)
unfold
let ex_stronger (a: Type) (wp1 wp2: ex_wp a) = (forall (p: ex_post a). wp1 p ==> wp2 p)
(** Closing the scope of a binder for exceptional WPs *)
unfold
let ex_close_wp (a b: Type) (wp: (b -> GTot (ex_wp a))) (p: ex_post a) = (forall (b: b). wp b p)
(** Applying a computation with a trivial postcondition *)
unfold
let ex_trivial (a: Type) (wp: ex_wp a) = wp (fun r -> True)
(** Introduce a new effect for [EXN] *)
new_effect {
EXN : result: Type -> wp: ex_wp result -> Effect
with
return_wp = ex_return
; bind_wp = ex_bind_wp
; if_then_else = ex_if_then_else
; ite_wp = ex_ite_wp
; stronger = ex_stronger
; close_wp = ex_close_wp
; trivial = ex_trivial
}
(** A Hoare-style abbreviation for EXN *)
effect Exn (a: Type) (pre: ex_pre) (post: ex_post' a pre) =
EXN a (fun (p: ex_post a) -> pre /\ (forall (r: result a). post r ==> p r))
(** We include divergence in exceptions.
NOTE: BE WARNED, CODE IN THE [EXN] EFFECT IS ONLY CHECKED FOR
PARTIAL CORRECTNESS *)
unfold
let lift_div_exn (a: Type) (wp: pure_wp a) (p: ex_post a) = wp (fun a -> p (V a))
sub_effect DIV ~> EXN { lift_wp = lift_div_exn }
(** A variant of [Exn] with trivial pre- and postconditions *)
effect Ex (a: Type) = Exn a True (fun v -> True)
/// The [ALL_h] effect template for computations that may diverge,
/// raise exceptions or fatal errors, and uses a generic state.
///
/// Note, this effect is poorly named, particularly as F* has since
/// gained many more user-defined effect. We no longer have an effect
/// that includes all others.
///
/// We might rename this in the future to something like [StExnDiv_h].
///
/// We layer state on top of exceptions, meaning that raising an
/// exception does not discard the state.
///
/// As with [STATE_h], [ALL_h] is not a computation type, though its
/// instantiation with a specific type of [heap] (in FStar.All) is.
(** [all_pre_h] is a predicate on the initial state *)
let all_pre_h (h: Type) = h -> GTot Type0 | false | true | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val all_post_h' : h: Type -> a: Type -> pre: Type -> Type | [] | FStar.Pervasives.all_post_h' | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | h: Type -> a: Type -> pre: Type -> Type | {
"end_col": 69,
"end_line": 620,
"start_col": 34,
"start_line": 620
} |
|
Prims.Tot | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let all_ite_wp (heap a: Type) (wp: all_wp_h heap a) (post: all_post_h heap a) (h0: heap) =
forall (k: all_post_h heap a).
(forall (x: result a) (h: heap). {:pattern (guard_free (k x h))} post x h ==> k x h) ==> wp k h0 | let all_ite_wp (heap a: Type) (wp: all_wp_h heap a) (post: all_post_h heap a) (h0: heap) = | false | null | false | forall (k: all_post_h heap a).
(forall (x: result a) (h: heap). {:pattern (guard_free (k x h))} post x h ==> k x h) ==> wp k h0 | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"total"
] | [
"FStar.Pervasives.all_wp_h",
"FStar.Pervasives.all_post_h",
"Prims.l_Forall",
"Prims.l_imp",
"FStar.Pervasives.result",
"Prims.guard_free",
"Prims.logical"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold
let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp
unfold
let pure_close_wp (a b:Type) (wp:b -> Tot (pure_wp a)) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp
unfold
let pure_null_wp (a:Type) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_null_wp0 a
[@@ "opaque_to_smt"]
unfold
let pure_assert_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assert_wp0 p
[@@ "opaque_to_smt"]
unfold
let pure_assume_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assume_wp0 p
/// The [DIV] effect for divergent computations
///
/// The wp-calculus for [DIV] is same as that of [PURE]
(** The effect of divergence: from a specificational perspective it is
identical to PURE, however the specs are given a partial
correctness interpretation. Computations with the [DIV] effect may
not terminate. *)
new_effect {
DIV : a:Type -> wp:pure_wp a -> Effect
with
return_wp = pure_return
; bind_wp = pure_bind_wp
; if_then_else = pure_if_then_else
; ite_wp = pure_ite_wp
; stronger = pure_stronger
; close_wp = pure_close_wp
; trivial = pure_trivial
}
(** [PURE] computations can be silently promoted for use in a [DIV] context *)
sub_effect PURE ~> DIV { lift_wp = purewp_id }
(** [Div] is the Hoare-style counterpart of the wp-indexed [DIV] *)
unfold
let div_hoare_to_wp (#a:Type) (#pre:pure_pre) (post:pure_post' a pre) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
fun (p:pure_post a) -> pre /\ (forall a. post a ==> p a)
effect Div (a: Type) (pre: pure_pre) (post: pure_post' a pre) =
DIV a (div_hoare_to_wp post)
(** [Dv] is the instance of [DIV] with trivial pre- and postconditions *)
effect Dv (a: Type) = DIV a (pure_null_wp a)
(** We use the [EXT] effect to underspecify external system calls
as being impure but having no observable effect on the state *)
effect EXT (a: Type) = Dv a
/// The [STATE_h] effect template for stateful computations, generic
/// in the type of the state.
///
/// Note, [STATE_h] is itself not a computation type in F*, since it
/// is parameterized by the type of heap. However, instantiations of
/// [STATE_h] with specific types of the heap are computation
/// types. See, e.g., [FStar.ST] for such instantiations.
///
/// Weakest preconditions for stateful computations transform
/// [st_post_h] postconditions to [st_pre_h] preconditions. Both are
/// parametric in the type of the state, here denoted by the
/// [heap:Type] variable.
(** Preconditions are predicates on the [heap] *)
let st_pre_h (heap: Type) = heap -> GTot Type0
(** Postconditions relate [a]-typed results to the final [heap], here
refined by some pure proposition [pre], typically instantiated to
the precondition applied to the initial [heap] *)
let st_post_h' (heap a pre: Type) = a -> _: heap{pre} -> GTot Type0
(** Postconditions without refinements *)
let st_post_h (heap a: Type) = st_post_h' heap a True
(** The type of the main WP-transformer for stateful computations *)
let st_wp_h (heap a: Type) = st_post_h heap a -> Tot (st_pre_h heap)
(** Returning a value does not transform the state *)
unfold
let st_return (heap a: Type) (x: a) (p: st_post_h heap a) = p x
(** Sequential composition of stateful WPs *)
unfold
let st_bind_wp
(heap: Type)
(a b: Type)
(wp1: st_wp_h heap a)
(wp2: (a -> GTot (st_wp_h heap b)))
(p: st_post_h heap b)
(h0: heap)
= wp1 (fun a h1 -> wp2 a p h1) h0
(** Branching for stateful WPs *)
unfold
let st_if_then_else
(heap a p: Type)
(wp_then wp_else: st_wp_h heap a)
(post: st_post_h heap a)
(h0: heap)
= wp_then post h0 /\ (~p ==> wp_else post h0)
(** As with [PURE] the [wp] combinator names the postcondition as
[k] to avoid duplicating it. *)
unfold
let st_ite_wp (heap a: Type) (wp: st_wp_h heap a) (post: st_post_h heap a) (h0: heap) =
forall (k: st_post_h heap a).
(forall (x: a) (h: heap). {:pattern (guard_free (k x h))} post x h ==> k x h) ==> wp k h0
(** Subsumption for stateful WPs *)
unfold
let st_stronger (heap a: Type) (wp1 wp2: st_wp_h heap a) =
(forall (p: st_post_h heap a) (h: heap). wp1 p h ==> wp2 p h)
(** Closing the scope of a binder within a stateful WP *)
unfold
let st_close_wp (heap a b: Type) (wp: (b -> GTot (st_wp_h heap a))) (p: st_post_h heap a) (h: heap) =
(forall (b: b). wp b p h)
(** Applying a stateful WP to a trivial postcondition *)
unfold
let st_trivial (heap a: Type) (wp: st_wp_h heap a) = (forall h0. wp (fun r h1 -> True) h0)
(** Introducing a new effect template [STATE_h] *)
new_effect {
STATE_h (heap: Type) : result: Type -> wp: st_wp_h heap result -> Effect
with
return_wp = st_return heap
; bind_wp = st_bind_wp heap
; if_then_else = st_if_then_else heap
; ite_wp = st_ite_wp heap
; stronger = st_stronger heap
; close_wp = st_close_wp heap
; trivial = st_trivial heap
}
/// The [EXN] effect for computations that may raise exceptions or
/// fatal errors
///
/// Weakest preconditions for stateful computations transform
/// [ex_post] postconditions (predicates on [result]s) to [ex_pre]
/// precondition propositions.
(** Normal results are represented using [V x].
Handleable exceptions are represented [E e].
Fatal errors are [Err msg]. *)
noeq
type result (a: Type) =
| V : v: a -> result a
| E : e: exn -> result a
| Err : msg: string -> result a
(** Exceptional preconditions are just propositions *)
let ex_pre = Type0
(** Postconditions on results refined by a precondition *)
let ex_post' (a pre: Type) = _: result a {pre} -> GTot Type0
(** Postconditions on results *)
let ex_post (a: Type) = ex_post' a True
(** Exceptions WP-predicate transformers *)
let ex_wp (a: Type) = ex_post a -> GTot ex_pre
(** Returning a value [x] normally promotes it to the [V x] result *)
unfold
let ex_return (a: Type) (x: a) (p: ex_post a) : GTot Type0 = p (V x)
(** Sequential composition of exception-raising code requires case analysing
the result of the first computation before "running" the second one *)
unfold
let ex_bind_wp (a b: Type) (wp1: ex_wp a) (wp2: (a -> GTot (ex_wp b))) (p: ex_post b)
: GTot Type0 =
forall (k: ex_post b).
(forall (rb: result b). {:pattern (guard_free (k rb))} p rb ==> k rb) ==>
(wp1 (function
| V ra1 -> wp2 ra1 k
| E e -> k (E e)
| Err m -> k (Err m)))
(** As for other effects, branching in [ex_wp] appears in two forms.
First, a simple case analysis on [p] *)
unfold
let ex_if_then_else (a p: Type) (wp_then wp_else: ex_wp a) (post: ex_post a) =
wp_then post /\ (~p ==> wp_else post)
(** Naming continuations for use with branching *)
unfold
let ex_ite_wp (a: Type) (wp: ex_wp a) (post: ex_post a) =
forall (k: ex_post a).
(forall (rb: result a). {:pattern (guard_free (k rb))} post rb ==> k rb) ==> wp k
(** Subsumption for exceptional WPs *)
unfold
let ex_stronger (a: Type) (wp1 wp2: ex_wp a) = (forall (p: ex_post a). wp1 p ==> wp2 p)
(** Closing the scope of a binder for exceptional WPs *)
unfold
let ex_close_wp (a b: Type) (wp: (b -> GTot (ex_wp a))) (p: ex_post a) = (forall (b: b). wp b p)
(** Applying a computation with a trivial postcondition *)
unfold
let ex_trivial (a: Type) (wp: ex_wp a) = wp (fun r -> True)
(** Introduce a new effect for [EXN] *)
new_effect {
EXN : result: Type -> wp: ex_wp result -> Effect
with
return_wp = ex_return
; bind_wp = ex_bind_wp
; if_then_else = ex_if_then_else
; ite_wp = ex_ite_wp
; stronger = ex_stronger
; close_wp = ex_close_wp
; trivial = ex_trivial
}
(** A Hoare-style abbreviation for EXN *)
effect Exn (a: Type) (pre: ex_pre) (post: ex_post' a pre) =
EXN a (fun (p: ex_post a) -> pre /\ (forall (r: result a). post r ==> p r))
(** We include divergence in exceptions.
NOTE: BE WARNED, CODE IN THE [EXN] EFFECT IS ONLY CHECKED FOR
PARTIAL CORRECTNESS *)
unfold
let lift_div_exn (a: Type) (wp: pure_wp a) (p: ex_post a) = wp (fun a -> p (V a))
sub_effect DIV ~> EXN { lift_wp = lift_div_exn }
(** A variant of [Exn] with trivial pre- and postconditions *)
effect Ex (a: Type) = Exn a True (fun v -> True)
/// The [ALL_h] effect template for computations that may diverge,
/// raise exceptions or fatal errors, and uses a generic state.
///
/// Note, this effect is poorly named, particularly as F* has since
/// gained many more user-defined effect. We no longer have an effect
/// that includes all others.
///
/// We might rename this in the future to something like [StExnDiv_h].
///
/// We layer state on top of exceptions, meaning that raising an
/// exception does not discard the state.
///
/// As with [STATE_h], [ALL_h] is not a computation type, though its
/// instantiation with a specific type of [heap] (in FStar.All) is.
(** [all_pre_h] is a predicate on the initial state *)
let all_pre_h (h: Type) = h -> GTot Type0
(** Postconditions relate [result]s to final [heap]s refined by a precondition *)
let all_post_h' (h a pre: Type) = result a -> _: h{pre} -> GTot Type0
(** A variant of [all_post_h'] without the precondition refinement *)
let all_post_h (h a: Type) = all_post_h' h a True
(** WP predicate transformers for the [All_h] effect template *)
let all_wp_h (h a: Type) = all_post_h h a -> Tot (all_pre_h h)
(** Returning a value [x] normally promotes it to the [V x] result
without touching the [heap] *)
unfold
let all_return (heap a: Type) (x: a) (p: all_post_h heap a) = p (V x)
(** Sequential composition for [ALL_h] is like [EXN]: case analysis of
the exceptional result before "running" the continuation *)
unfold
let all_bind_wp
(heap: Type)
(a b: Type)
(wp1: all_wp_h heap a)
(wp2: (a -> GTot (all_wp_h heap b)))
(p: all_post_h heap b)
(h0: heap)
: GTot Type0 =
wp1 (fun ra h1 ->
(match ra with
| V v -> wp2 v p h1
| E e -> p (E e) h1
| Err msg -> p (Err msg) h1))
h0
(** Case analysis in [ALL_h] *)
unfold
let all_if_then_else
(heap a p: Type)
(wp_then wp_else: all_wp_h heap a)
(post: all_post_h heap a)
(h0: heap)
= wp_then post h0 /\ (~p ==> wp_else post h0)
(** Naming postcondition for better sharing in [ALL_h] *)
unfold | false | false | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val all_ite_wp : heap: Type ->
a: Type ->
wp: FStar.Pervasives.all_wp_h heap a ->
post: FStar.Pervasives.all_post_h heap a ->
h0: heap
-> Prims.logical | [] | FStar.Pervasives.all_ite_wp | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
heap: Type ->
a: Type ->
wp: FStar.Pervasives.all_wp_h heap a ->
post: FStar.Pervasives.all_post_h heap a ->
h0: heap
-> Prims.logical | {
"end_col": 100,
"end_line": 664,
"start_col": 2,
"start_line": 663
} |
|
Prims.Tot | val div_hoare_to_wp (#a: Type) (#pre: pure_pre) (post: pure_post' a pre) : Tot (pure_wp a) | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let div_hoare_to_wp (#a:Type) (#pre:pure_pre) (post:pure_post' a pre) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
fun (p:pure_post a) -> pre /\ (forall a. post a ==> p a) | val div_hoare_to_wp (#a: Type) (#pre: pure_pre) (post: pure_post' a pre) : Tot (pure_wp a)
let div_hoare_to_wp (#a: Type) (#pre: pure_pre) (post: pure_post' a pre) : Tot (pure_wp a) = | false | null | false | reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
fun (p: pure_post a) -> pre /\ (forall a. post a ==> p a) | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"total"
] | [
"Prims.pure_pre",
"Prims.pure_post'",
"Prims.pure_post",
"Prims.l_and",
"Prims.l_Forall",
"Prims.l_imp",
"Prims.logical",
"Prims.unit",
"FStar.Pervasives.reveal_opaque",
"Prims.pure_wp'",
"Prims.pure_wp_monotonic",
"Prims.pure_wp"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold
let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp
unfold
let pure_close_wp (a b:Type) (wp:b -> Tot (pure_wp a)) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp
unfold
let pure_null_wp (a:Type) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_null_wp0 a
[@@ "opaque_to_smt"]
unfold
let pure_assert_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assert_wp0 p
[@@ "opaque_to_smt"]
unfold
let pure_assume_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assume_wp0 p
/// The [DIV] effect for divergent computations
///
/// The wp-calculus for [DIV] is same as that of [PURE]
(** The effect of divergence: from a specificational perspective it is
identical to PURE, however the specs are given a partial
correctness interpretation. Computations with the [DIV] effect may
not terminate. *)
new_effect {
DIV : a:Type -> wp:pure_wp a -> Effect
with
return_wp = pure_return
; bind_wp = pure_bind_wp
; if_then_else = pure_if_then_else
; ite_wp = pure_ite_wp
; stronger = pure_stronger
; close_wp = pure_close_wp
; trivial = pure_trivial
}
(** [PURE] computations can be silently promoted for use in a [DIV] context *)
sub_effect PURE ~> DIV { lift_wp = purewp_id }
(** [Div] is the Hoare-style counterpart of the wp-indexed [DIV] *)
unfold | false | false | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val div_hoare_to_wp (#a: Type) (#pre: pure_pre) (post: pure_post' a pre) : Tot (pure_wp a) | [] | FStar.Pervasives.div_hoare_to_wp | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | post: Prims.pure_post' a pre -> Prims.pure_wp a | {
"end_col": 58,
"end_line": 406,
"start_col": 2,
"start_line": 405
} |
Prims.GTot | val ex_bind_wp (a b: Type) (wp1: ex_wp a) (wp2: (a -> GTot (ex_wp b))) (p: ex_post b) : GTot Type0 | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let ex_bind_wp (a b: Type) (wp1: ex_wp a) (wp2: (a -> GTot (ex_wp b))) (p: ex_post b)
: GTot Type0 =
forall (k: ex_post b).
(forall (rb: result b). {:pattern (guard_free (k rb))} p rb ==> k rb) ==>
(wp1 (function
| V ra1 -> wp2 ra1 k
| E e -> k (E e)
| Err m -> k (Err m))) | val ex_bind_wp (a b: Type) (wp1: ex_wp a) (wp2: (a -> GTot (ex_wp b))) (p: ex_post b) : GTot Type0
let ex_bind_wp (a b: Type) (wp1: ex_wp a) (wp2: (a -> GTot (ex_wp b))) (p: ex_post b) : GTot Type0 = | false | null | false | forall (k: ex_post b).
(forall (rb: result b). {:pattern (guard_free (k rb))} p rb ==> k rb) ==>
(wp1 (function
| V ra1 -> wp2 ra1 k
| E e -> k (E e)
| Err m -> k (Err m))) | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"sometrivial"
] | [
"FStar.Pervasives.ex_wp",
"FStar.Pervasives.ex_post",
"Prims.l_Forall",
"Prims.l_imp",
"FStar.Pervasives.result",
"Prims.guard_free",
"Prims.l_True",
"Prims.exn",
"FStar.Pervasives.E",
"Prims.string",
"FStar.Pervasives.Err"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold
let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp
unfold
let pure_close_wp (a b:Type) (wp:b -> Tot (pure_wp a)) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp
unfold
let pure_null_wp (a:Type) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_null_wp0 a
[@@ "opaque_to_smt"]
unfold
let pure_assert_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assert_wp0 p
[@@ "opaque_to_smt"]
unfold
let pure_assume_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assume_wp0 p
/// The [DIV] effect for divergent computations
///
/// The wp-calculus for [DIV] is same as that of [PURE]
(** The effect of divergence: from a specificational perspective it is
identical to PURE, however the specs are given a partial
correctness interpretation. Computations with the [DIV] effect may
not terminate. *)
new_effect {
DIV : a:Type -> wp:pure_wp a -> Effect
with
return_wp = pure_return
; bind_wp = pure_bind_wp
; if_then_else = pure_if_then_else
; ite_wp = pure_ite_wp
; stronger = pure_stronger
; close_wp = pure_close_wp
; trivial = pure_trivial
}
(** [PURE] computations can be silently promoted for use in a [DIV] context *)
sub_effect PURE ~> DIV { lift_wp = purewp_id }
(** [Div] is the Hoare-style counterpart of the wp-indexed [DIV] *)
unfold
let div_hoare_to_wp (#a:Type) (#pre:pure_pre) (post:pure_post' a pre) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
fun (p:pure_post a) -> pre /\ (forall a. post a ==> p a)
effect Div (a: Type) (pre: pure_pre) (post: pure_post' a pre) =
DIV a (div_hoare_to_wp post)
(** [Dv] is the instance of [DIV] with trivial pre- and postconditions *)
effect Dv (a: Type) = DIV a (pure_null_wp a)
(** We use the [EXT] effect to underspecify external system calls
as being impure but having no observable effect on the state *)
effect EXT (a: Type) = Dv a
/// The [STATE_h] effect template for stateful computations, generic
/// in the type of the state.
///
/// Note, [STATE_h] is itself not a computation type in F*, since it
/// is parameterized by the type of heap. However, instantiations of
/// [STATE_h] with specific types of the heap are computation
/// types. See, e.g., [FStar.ST] for such instantiations.
///
/// Weakest preconditions for stateful computations transform
/// [st_post_h] postconditions to [st_pre_h] preconditions. Both are
/// parametric in the type of the state, here denoted by the
/// [heap:Type] variable.
(** Preconditions are predicates on the [heap] *)
let st_pre_h (heap: Type) = heap -> GTot Type0
(** Postconditions relate [a]-typed results to the final [heap], here
refined by some pure proposition [pre], typically instantiated to
the precondition applied to the initial [heap] *)
let st_post_h' (heap a pre: Type) = a -> _: heap{pre} -> GTot Type0
(** Postconditions without refinements *)
let st_post_h (heap a: Type) = st_post_h' heap a True
(** The type of the main WP-transformer for stateful computations *)
let st_wp_h (heap a: Type) = st_post_h heap a -> Tot (st_pre_h heap)
(** Returning a value does not transform the state *)
unfold
let st_return (heap a: Type) (x: a) (p: st_post_h heap a) = p x
(** Sequential composition of stateful WPs *)
unfold
let st_bind_wp
(heap: Type)
(a b: Type)
(wp1: st_wp_h heap a)
(wp2: (a -> GTot (st_wp_h heap b)))
(p: st_post_h heap b)
(h0: heap)
= wp1 (fun a h1 -> wp2 a p h1) h0
(** Branching for stateful WPs *)
unfold
let st_if_then_else
(heap a p: Type)
(wp_then wp_else: st_wp_h heap a)
(post: st_post_h heap a)
(h0: heap)
= wp_then post h0 /\ (~p ==> wp_else post h0)
(** As with [PURE] the [wp] combinator names the postcondition as
[k] to avoid duplicating it. *)
unfold
let st_ite_wp (heap a: Type) (wp: st_wp_h heap a) (post: st_post_h heap a) (h0: heap) =
forall (k: st_post_h heap a).
(forall (x: a) (h: heap). {:pattern (guard_free (k x h))} post x h ==> k x h) ==> wp k h0
(** Subsumption for stateful WPs *)
unfold
let st_stronger (heap a: Type) (wp1 wp2: st_wp_h heap a) =
(forall (p: st_post_h heap a) (h: heap). wp1 p h ==> wp2 p h)
(** Closing the scope of a binder within a stateful WP *)
unfold
let st_close_wp (heap a b: Type) (wp: (b -> GTot (st_wp_h heap a))) (p: st_post_h heap a) (h: heap) =
(forall (b: b). wp b p h)
(** Applying a stateful WP to a trivial postcondition *)
unfold
let st_trivial (heap a: Type) (wp: st_wp_h heap a) = (forall h0. wp (fun r h1 -> True) h0)
(** Introducing a new effect template [STATE_h] *)
new_effect {
STATE_h (heap: Type) : result: Type -> wp: st_wp_h heap result -> Effect
with
return_wp = st_return heap
; bind_wp = st_bind_wp heap
; if_then_else = st_if_then_else heap
; ite_wp = st_ite_wp heap
; stronger = st_stronger heap
; close_wp = st_close_wp heap
; trivial = st_trivial heap
}
/// The [EXN] effect for computations that may raise exceptions or
/// fatal errors
///
/// Weakest preconditions for stateful computations transform
/// [ex_post] postconditions (predicates on [result]s) to [ex_pre]
/// precondition propositions.
(** Normal results are represented using [V x].
Handleable exceptions are represented [E e].
Fatal errors are [Err msg]. *)
noeq
type result (a: Type) =
| V : v: a -> result a
| E : e: exn -> result a
| Err : msg: string -> result a
(** Exceptional preconditions are just propositions *)
let ex_pre = Type0
(** Postconditions on results refined by a precondition *)
let ex_post' (a pre: Type) = _: result a {pre} -> GTot Type0
(** Postconditions on results *)
let ex_post (a: Type) = ex_post' a True
(** Exceptions WP-predicate transformers *)
let ex_wp (a: Type) = ex_post a -> GTot ex_pre
(** Returning a value [x] normally promotes it to the [V x] result *)
unfold
let ex_return (a: Type) (x: a) (p: ex_post a) : GTot Type0 = p (V x)
(** Sequential composition of exception-raising code requires case analysing
the result of the first computation before "running" the second one *)
unfold
let ex_bind_wp (a b: Type) (wp1: ex_wp a) (wp2: (a -> GTot (ex_wp b))) (p: ex_post b) | false | false | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val ex_bind_wp (a b: Type) (wp1: ex_wp a) (wp2: (a -> GTot (ex_wp b))) (p: ex_post b) : GTot Type0 | [] | FStar.Pervasives.ex_bind_wp | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
a: Type ->
b: Type ->
wp1: FStar.Pervasives.ex_wp a ->
wp2: (_: a -> Prims.GTot (FStar.Pervasives.ex_wp b)) ->
p: FStar.Pervasives.ex_post b
-> Prims.GTot Type0 | {
"end_col": 32,
"end_line": 547,
"start_col": 2,
"start_line": 542
} |
Prims.GTot | val all_bind_wp
(heap a b: Type)
(wp1: all_wp_h heap a)
(wp2: (a -> GTot (all_wp_h heap b)))
(p: all_post_h heap b)
(h0: heap)
: GTot Type0 | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let all_bind_wp
(heap: Type)
(a b: Type)
(wp1: all_wp_h heap a)
(wp2: (a -> GTot (all_wp_h heap b)))
(p: all_post_h heap b)
(h0: heap)
: GTot Type0 =
wp1 (fun ra h1 ->
(match ra with
| V v -> wp2 v p h1
| E e -> p (E e) h1
| Err msg -> p (Err msg) h1))
h0 | val all_bind_wp
(heap a b: Type)
(wp1: all_wp_h heap a)
(wp2: (a -> GTot (all_wp_h heap b)))
(p: all_post_h heap b)
(h0: heap)
: GTot Type0
let all_bind_wp
(heap a b: Type)
(wp1: all_wp_h heap a)
(wp2: (a -> GTot (all_wp_h heap b)))
(p: all_post_h heap b)
(h0: heap)
: GTot Type0 = | false | null | false | wp1 (fun ra h1 ->
(match ra with
| V v -> wp2 v p h1
| E e -> p (E e) h1
| Err msg -> p (Err msg) h1))
h0 | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"sometrivial"
] | [
"FStar.Pervasives.all_wp_h",
"FStar.Pervasives.all_post_h",
"FStar.Pervasives.result",
"Prims.l_True",
"Prims.exn",
"FStar.Pervasives.E",
"Prims.string",
"FStar.Pervasives.Err"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold
let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp
unfold
let pure_close_wp (a b:Type) (wp:b -> Tot (pure_wp a)) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp
unfold
let pure_null_wp (a:Type) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_null_wp0 a
[@@ "opaque_to_smt"]
unfold
let pure_assert_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assert_wp0 p
[@@ "opaque_to_smt"]
unfold
let pure_assume_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assume_wp0 p
/// The [DIV] effect for divergent computations
///
/// The wp-calculus for [DIV] is same as that of [PURE]
(** The effect of divergence: from a specificational perspective it is
identical to PURE, however the specs are given a partial
correctness interpretation. Computations with the [DIV] effect may
not terminate. *)
new_effect {
DIV : a:Type -> wp:pure_wp a -> Effect
with
return_wp = pure_return
; bind_wp = pure_bind_wp
; if_then_else = pure_if_then_else
; ite_wp = pure_ite_wp
; stronger = pure_stronger
; close_wp = pure_close_wp
; trivial = pure_trivial
}
(** [PURE] computations can be silently promoted for use in a [DIV] context *)
sub_effect PURE ~> DIV { lift_wp = purewp_id }
(** [Div] is the Hoare-style counterpart of the wp-indexed [DIV] *)
unfold
let div_hoare_to_wp (#a:Type) (#pre:pure_pre) (post:pure_post' a pre) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
fun (p:pure_post a) -> pre /\ (forall a. post a ==> p a)
effect Div (a: Type) (pre: pure_pre) (post: pure_post' a pre) =
DIV a (div_hoare_to_wp post)
(** [Dv] is the instance of [DIV] with trivial pre- and postconditions *)
effect Dv (a: Type) = DIV a (pure_null_wp a)
(** We use the [EXT] effect to underspecify external system calls
as being impure but having no observable effect on the state *)
effect EXT (a: Type) = Dv a
/// The [STATE_h] effect template for stateful computations, generic
/// in the type of the state.
///
/// Note, [STATE_h] is itself not a computation type in F*, since it
/// is parameterized by the type of heap. However, instantiations of
/// [STATE_h] with specific types of the heap are computation
/// types. See, e.g., [FStar.ST] for such instantiations.
///
/// Weakest preconditions for stateful computations transform
/// [st_post_h] postconditions to [st_pre_h] preconditions. Both are
/// parametric in the type of the state, here denoted by the
/// [heap:Type] variable.
(** Preconditions are predicates on the [heap] *)
let st_pre_h (heap: Type) = heap -> GTot Type0
(** Postconditions relate [a]-typed results to the final [heap], here
refined by some pure proposition [pre], typically instantiated to
the precondition applied to the initial [heap] *)
let st_post_h' (heap a pre: Type) = a -> _: heap{pre} -> GTot Type0
(** Postconditions without refinements *)
let st_post_h (heap a: Type) = st_post_h' heap a True
(** The type of the main WP-transformer for stateful computations *)
let st_wp_h (heap a: Type) = st_post_h heap a -> Tot (st_pre_h heap)
(** Returning a value does not transform the state *)
unfold
let st_return (heap a: Type) (x: a) (p: st_post_h heap a) = p x
(** Sequential composition of stateful WPs *)
unfold
let st_bind_wp
(heap: Type)
(a b: Type)
(wp1: st_wp_h heap a)
(wp2: (a -> GTot (st_wp_h heap b)))
(p: st_post_h heap b)
(h0: heap)
= wp1 (fun a h1 -> wp2 a p h1) h0
(** Branching for stateful WPs *)
unfold
let st_if_then_else
(heap a p: Type)
(wp_then wp_else: st_wp_h heap a)
(post: st_post_h heap a)
(h0: heap)
= wp_then post h0 /\ (~p ==> wp_else post h0)
(** As with [PURE] the [wp] combinator names the postcondition as
[k] to avoid duplicating it. *)
unfold
let st_ite_wp (heap a: Type) (wp: st_wp_h heap a) (post: st_post_h heap a) (h0: heap) =
forall (k: st_post_h heap a).
(forall (x: a) (h: heap). {:pattern (guard_free (k x h))} post x h ==> k x h) ==> wp k h0
(** Subsumption for stateful WPs *)
unfold
let st_stronger (heap a: Type) (wp1 wp2: st_wp_h heap a) =
(forall (p: st_post_h heap a) (h: heap). wp1 p h ==> wp2 p h)
(** Closing the scope of a binder within a stateful WP *)
unfold
let st_close_wp (heap a b: Type) (wp: (b -> GTot (st_wp_h heap a))) (p: st_post_h heap a) (h: heap) =
(forall (b: b). wp b p h)
(** Applying a stateful WP to a trivial postcondition *)
unfold
let st_trivial (heap a: Type) (wp: st_wp_h heap a) = (forall h0. wp (fun r h1 -> True) h0)
(** Introducing a new effect template [STATE_h] *)
new_effect {
STATE_h (heap: Type) : result: Type -> wp: st_wp_h heap result -> Effect
with
return_wp = st_return heap
; bind_wp = st_bind_wp heap
; if_then_else = st_if_then_else heap
; ite_wp = st_ite_wp heap
; stronger = st_stronger heap
; close_wp = st_close_wp heap
; trivial = st_trivial heap
}
/// The [EXN] effect for computations that may raise exceptions or
/// fatal errors
///
/// Weakest preconditions for stateful computations transform
/// [ex_post] postconditions (predicates on [result]s) to [ex_pre]
/// precondition propositions.
(** Normal results are represented using [V x].
Handleable exceptions are represented [E e].
Fatal errors are [Err msg]. *)
noeq
type result (a: Type) =
| V : v: a -> result a
| E : e: exn -> result a
| Err : msg: string -> result a
(** Exceptional preconditions are just propositions *)
let ex_pre = Type0
(** Postconditions on results refined by a precondition *)
let ex_post' (a pre: Type) = _: result a {pre} -> GTot Type0
(** Postconditions on results *)
let ex_post (a: Type) = ex_post' a True
(** Exceptions WP-predicate transformers *)
let ex_wp (a: Type) = ex_post a -> GTot ex_pre
(** Returning a value [x] normally promotes it to the [V x] result *)
unfold
let ex_return (a: Type) (x: a) (p: ex_post a) : GTot Type0 = p (V x)
(** Sequential composition of exception-raising code requires case analysing
the result of the first computation before "running" the second one *)
unfold
let ex_bind_wp (a b: Type) (wp1: ex_wp a) (wp2: (a -> GTot (ex_wp b))) (p: ex_post b)
: GTot Type0 =
forall (k: ex_post b).
(forall (rb: result b). {:pattern (guard_free (k rb))} p rb ==> k rb) ==>
(wp1 (function
| V ra1 -> wp2 ra1 k
| E e -> k (E e)
| Err m -> k (Err m)))
(** As for other effects, branching in [ex_wp] appears in two forms.
First, a simple case analysis on [p] *)
unfold
let ex_if_then_else (a p: Type) (wp_then wp_else: ex_wp a) (post: ex_post a) =
wp_then post /\ (~p ==> wp_else post)
(** Naming continuations for use with branching *)
unfold
let ex_ite_wp (a: Type) (wp: ex_wp a) (post: ex_post a) =
forall (k: ex_post a).
(forall (rb: result a). {:pattern (guard_free (k rb))} post rb ==> k rb) ==> wp k
(** Subsumption for exceptional WPs *)
unfold
let ex_stronger (a: Type) (wp1 wp2: ex_wp a) = (forall (p: ex_post a). wp1 p ==> wp2 p)
(** Closing the scope of a binder for exceptional WPs *)
unfold
let ex_close_wp (a b: Type) (wp: (b -> GTot (ex_wp a))) (p: ex_post a) = (forall (b: b). wp b p)
(** Applying a computation with a trivial postcondition *)
unfold
let ex_trivial (a: Type) (wp: ex_wp a) = wp (fun r -> True)
(** Introduce a new effect for [EXN] *)
new_effect {
EXN : result: Type -> wp: ex_wp result -> Effect
with
return_wp = ex_return
; bind_wp = ex_bind_wp
; if_then_else = ex_if_then_else
; ite_wp = ex_ite_wp
; stronger = ex_stronger
; close_wp = ex_close_wp
; trivial = ex_trivial
}
(** A Hoare-style abbreviation for EXN *)
effect Exn (a: Type) (pre: ex_pre) (post: ex_post' a pre) =
EXN a (fun (p: ex_post a) -> pre /\ (forall (r: result a). post r ==> p r))
(** We include divergence in exceptions.
NOTE: BE WARNED, CODE IN THE [EXN] EFFECT IS ONLY CHECKED FOR
PARTIAL CORRECTNESS *)
unfold
let lift_div_exn (a: Type) (wp: pure_wp a) (p: ex_post a) = wp (fun a -> p (V a))
sub_effect DIV ~> EXN { lift_wp = lift_div_exn }
(** A variant of [Exn] with trivial pre- and postconditions *)
effect Ex (a: Type) = Exn a True (fun v -> True)
/// The [ALL_h] effect template for computations that may diverge,
/// raise exceptions or fatal errors, and uses a generic state.
///
/// Note, this effect is poorly named, particularly as F* has since
/// gained many more user-defined effect. We no longer have an effect
/// that includes all others.
///
/// We might rename this in the future to something like [StExnDiv_h].
///
/// We layer state on top of exceptions, meaning that raising an
/// exception does not discard the state.
///
/// As with [STATE_h], [ALL_h] is not a computation type, though its
/// instantiation with a specific type of [heap] (in FStar.All) is.
(** [all_pre_h] is a predicate on the initial state *)
let all_pre_h (h: Type) = h -> GTot Type0
(** Postconditions relate [result]s to final [heap]s refined by a precondition *)
let all_post_h' (h a pre: Type) = result a -> _: h{pre} -> GTot Type0
(** A variant of [all_post_h'] without the precondition refinement *)
let all_post_h (h a: Type) = all_post_h' h a True
(** WP predicate transformers for the [All_h] effect template *)
let all_wp_h (h a: Type) = all_post_h h a -> Tot (all_pre_h h)
(** Returning a value [x] normally promotes it to the [V x] result
without touching the [heap] *)
unfold
let all_return (heap a: Type) (x: a) (p: all_post_h heap a) = p (V x)
(** Sequential composition for [ALL_h] is like [EXN]: case analysis of
the exceptional result before "running" the continuation *)
unfold
let all_bind_wp
(heap: Type)
(a b: Type)
(wp1: all_wp_h heap a)
(wp2: (a -> GTot (all_wp_h heap b)))
(p: all_post_h heap b)
(h0: heap) | false | false | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val all_bind_wp
(heap a b: Type)
(wp1: all_wp_h heap a)
(wp2: (a -> GTot (all_wp_h heap b)))
(p: all_post_h heap b)
(h0: heap)
: GTot Type0 | [] | FStar.Pervasives.all_bind_wp | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
heap: Type ->
a: Type ->
b: Type ->
wp1: FStar.Pervasives.all_wp_h heap a ->
wp2: (_: a -> Prims.GTot (FStar.Pervasives.all_wp_h heap b)) ->
p: FStar.Pervasives.all_post_h heap b ->
h0: heap
-> Prims.GTot Type0 | {
"end_col": 6,
"end_line": 649,
"start_col": 2,
"start_line": 644
} |
Prims.Tot | val coerce_eq: #a: Type -> #b: Type -> squash (a == b) -> x: a -> b | [
{
"abbrev": false,
"full_module": "FStar.Pervasives.Native",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let coerce_eq (#a:Type) (#b:Type) (_:squash (a == b)) (x:a) : b = x | val coerce_eq: #a: Type -> #b: Type -> squash (a == b) -> x: a -> b
let coerce_eq (#a: Type) (#b: Type) (_: squash (a == b)) (x: a) : b = | false | null | false | x | {
"checked_file": "FStar.Pervasives.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked"
],
"interface_file": false,
"source_file": "FStar.Pervasives.fsti"
} | [
"total"
] | [
"Prims.squash",
"Prims.eq2"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pervasives
(* This is a file from the core library, dependencies must be explicit *)
open Prims
include FStar.Pervasives.Native
/// This module is implicitly opened in the scope of all other
/// modules.
///
/// It provides several basic definitions in F* that are common to
/// most programs. Broadly, these include:
///
/// - Utility types and functions, like [id], [either], dependent
/// tuples, etc.
///
/// - Utility effect definitions, including [DIV] for divergence,
/// [EXN] of exceptions, [STATE_h] a template for state, and (the
/// poorly named) [ALL_h] which combines them all.
///
/// - Some utilities to control proofs, e.g., inversion of inductive
/// type definitions.
///
/// - Built-in attributes that can be used to decorate definitions and
/// trigger various kinds of special treatments for those
/// definitions.
(** [remove_unused_type_parameters]
This attribute is used to decorate signatures in interfaces for
type abbreviations, indicating that the 0-based positional
parameters are unused in the definition and should be eliminated
for extraction.
This is important particularly for use with F# extraction, since
F# does not accept type abbreviations with unused type parameters.
See tests/bug-reports/RemoveUnusedTyparsIFace.A.fsti
*)
val remove_unused_type_parameters : list int -> Tot unit
(** Values of type [pattern] are used to tag [Lemma]s with SMT
quantifier triggers *)
type pattern : Type0 = unit
(** The concrete syntax [SMTPat] desugars to [smt_pat] *)
val smt_pat (#a: Type) (x: a) : Tot pattern
(** The concrete syntax [SMTPatOr] desugars to [smt_pat_or]. This is
used to represent a disjunction of conjunctions of patterns.
Note, the typing discipline and syntax of patterns is laxer than
it should be. Patterns like [SMTPatOr [SMTPatOr [...]]] are
expressible, but unsupported by F*
TODO: We should tighten this up, perhaps just reusing the
attribute mechanism for patterns.
*)
val smt_pat_or (x: list (list pattern)) : Tot pattern
(** eqtype is defined in prims at universe 0
Although, usually, only universe 0 types have decidable equality,
sometimes it is possible to define a type in a higher univese also
with decidable equality (e.g., type t : Type u#1 = | Unit)
Further, sometimes, as in Lemma below, we need to use a
universe-polymorphic equality type (although it is only ever
instantiated with `unit`)
*)
type eqtype_u = a:Type{hasEq a}
(** [Lemma] is a very widely used effect abbreviation.
It stands for a unit-returning [Ghost] computation, whose main
value is its logical payload in proving an implication between its
pre- and postcondition.
[Lemma] is desugared specially. The valid forms are:
Lemma (ensures post)
Lemma post [SMTPat ...]
Lemma (ensures post) [SMTPat ...]
Lemma (ensures post) (decreases d)
Lemma (ensures post) (decreases d) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d)
Lemma (requires pre) (ensures post) [SMTPat ...]
Lemma (requires pre) (ensures post) (decreases d) [SMTPat ...]
and
Lemma post (== Lemma (ensures post))
the squash argument on the postcondition allows to assume the
precondition for the *well-formedness* of the postcondition.
*)
effect Lemma (a: eqtype_u) (pre: Type) (post: (squash pre -> Type)) (pats: list pattern) =
Pure a pre (fun r -> post ())
(** IN the default mode of operation, all proofs in a verification
condition are bundled into a single SMT query. Sub-terms marked
with the [spinoff] below are the exception: each of them is
spawned off into a separate SMT query *)
val spinoff (p: Type0) : Type0
val spinoff_equiv (p:Type0) : Lemma (p <==> spinoff p) [SMTPat (spinoff p)]
(** Logically equivalent to assert, but spins off separate query *)
val assert_spinoff (p: Type) : Pure unit (requires (spinoff (squash p))) (ensures (fun x -> p))
(** The polymorphic identity function *)
unfold
let id (#a: Type) (x: a) : a = x
(** Trivial postconditions for the [PURE] effect *)
unfold
let trivial_pure_post (a: Type) : pure_post a = fun _ -> True
(** Sometimes it is convenient to explicit introduce nullary symbols
into the ambient context, so that SMT can appeal to their definitions
even when they are no mentioned explicitly in the program, e.g., when
needed for triggers.
Use [intro_ambient t] for that.
See, e.g., LowStar.Monotonic.Buffer.fst and its usage there for loc_none *)
[@@ remove_unused_type_parameters [0; 1;]]
val ambient (#a: Type) (x: a) : Type0
(** cf. [ambient], above *)
val intro_ambient (#a: Type) (x: a) : Tot (squash (ambient x))
/// Controlling normalization
(** In any invocation of the F* normalizer, every occurrence of
[normalize_term e] is reduced to the full normal for of [e]. *)
val normalize_term (#a: Type) (x: a) : Tot a
(** In any invocation of the F* normalizer, every occurrence of
[normalize e] is reduced to the full normal for of [e]. *)
val normalize (a: Type0) : Type0
(** Value of [norm_step] are used to enable specific normalization
steps, controlling how the normalizer reduces terms. *)
val norm_step : Type0
(** Logical simplification, e.g., [P /\ True ~> P] *)
val simplify : norm_step
(** Weak reduction: Do not reduce under binders *)
val weak : norm_step
(** Head normal form *)
val hnf : norm_step
(** Reduce primitive operators, e.g., [1 + 1 ~> 2] *)
val primops : norm_step
(** Unfold all non-recursive definitions *)
val delta : norm_step
(** Turn on debugging for this specific call. *)
val norm_debug : norm_step
(** Unroll recursive calls
Note: Since F*'s termination check is semantic rather than
syntactically structural, recursive calls in inconsistent contexts,
or recursive evaluation of open terms can diverge.
When asking for the [zeta] step, F* implements a heuristic to
disable [zeta] when reducing terms beneath a blocked match. This
helps prevent some trivial looping behavior. However, it also
means that with [zeta] alone, your term may not reduce as much as
you might want. See [zeta_full] for that.
*)
val zeta : norm_step
(** Unroll recursive calls
Unlike [zeta], [zeta_full] has no looping prevention
heuristics. F* will try to unroll recursive functions as much as
it can, potentially looping. Use with care.
Note, [zeta_full] implies [zeta].
See [tests/micro-benchmarks/ReduceRecUnderMatch.fst] for an example.
*)
val zeta_full : norm_step
(** Reduce case analysis (i.e., match) *)
val iota : norm_step
(** Use normalization-by-evaluation, instead of interpretation (experimental) *)
val nbe : norm_step
(** Reify effectful definitions into their representations *)
val reify_ : norm_step
(** Unlike [delta], unfold definitions for only the names in the given
list. Each string is a fully qualified name like [A.M.f] *)
val delta_only (s: list string) : Tot norm_step
(** Unfold definitions for only the names in the given list, but
unfold each definition encountered after unfolding as well.
For example, given
{[
let f0 = 0
let f1 = f0 + 1
]}
[norm [delta_only [`%f1]] f1] will reduce to [f0 + 1].
[norm [delta_fully [`%f1]] f1] will reduce to [0 + 1].
Each string is a fully qualified name like [A.M.f], typically
constructed using a quotation, as in the example above. *)
val delta_fully (s: list string) : Tot norm_step
(** Rather than mention a symbol to unfold by name, it can be
convenient to tag a collection of related symbols with a common
attribute and then to ask the normalizer to reduce them all.
For example, given:
{[
irreducible let my_attr = ()
[@@my_attr]
let f0 = 0
[@@my_attr]
let f1 = f0 + 1
]}
{[norm [delta_attr [`%my_attr]] f1]}
will reduce to [0 + 1].
*)
val delta_attr (s: list string) : Tot norm_step
(**
For example, given:
{[
unfold
let f0 = 0
inline_for_extraction
let f1 = f0 + 1
]}
{[norm [delta_qualifier ["unfold"; "inline_for_extraction"]] f1]}
will reduce to [0 + 1].
*)
val delta_qualifier (s: list string) : Tot norm_step
val delta_namespace (s: list string) : Tot norm_step
(**
This step removes the some internal meta nodes during normalization
In most cases you shouldn't need to use this step explicitly
*)
val unmeta : norm_step
(**
This step removes ascriptions during normalization
An ascription is a type or computation type annotation on
an expression, written as (e <: t) or (e <: C)
normalize (e <: (t|C)) usually would normalize both the expression e
and the ascription
However, with unascribe step on, it will drop the ascription
and return the result of (normalize e),
Removing ascriptions may improve the performance,
as the normalization has less work to do
However, ascriptions help in re-typechecking of the terms,
and in some cases, are necessary for doing so
Use it with care
*)
val unascribe : norm_step
(** [norm s e] requests normalization of [e] with the reduction steps
[s]. *)
val norm (s: list norm_step) (#a: Type) (x: a) : Tot a
(** [assert_norm p] reduces [p] as much as possible and then asks the
SMT solver to prove the reduct, concluding [p] *)
val assert_norm (p: Type) : Pure unit (requires (normalize p)) (ensures (fun _ -> p))
(** Sometimes it is convenient to introduce an equation between a term
and its normal form in the context. *)
val normalize_term_spec (#a: Type) (x: a) : Lemma (normalize_term #a x == x)
(** Like [normalize_term_spec], but specialized to [Type0] *)
val normalize_spec (a: Type0) : Lemma (normalize a == a)
(** Like [normalize_term_spec], but with specific normalization steps *)
val norm_spec (s: list norm_step) (#a: Type) (x: a) : Lemma (norm s #a x == x)
(** Use the following to expose an ["opaque_to_smt"] definition to the
solver as: [reveal_opaque (`%defn) defn] *)
let reveal_opaque (s: string) = norm_spec [delta_only [s]]
(** Wrappers over pure wp combinators that return a pure_wp type
(with monotonicity refinement) *)
unfold
let pure_return (a:Type) (x:a) : pure_wp a =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_return0 a x
unfold
let pure_bind_wp (a b:Type) (wp1:pure_wp a) (wp2:(a -> Tot (pure_wp b))) : Tot (pure_wp b) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_bind_wp0 a b wp1 wp2
unfold
let pure_if_then_else (a p:Type) (wp_then wp_else:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_if_then_else0 a p wp_then wp_else
unfold
let pure_ite_wp (a:Type) (wp:pure_wp a) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_ite_wp0 a wp
unfold
let pure_close_wp (a b:Type) (wp:b -> Tot (pure_wp a)) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_close_wp0 a b wp
unfold
let pure_null_wp (a:Type) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_null_wp0 a
[@@ "opaque_to_smt"]
unfold
let pure_assert_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assert_wp0 p
[@@ "opaque_to_smt"]
unfold
let pure_assume_wp (p:Type) : Tot (pure_wp unit) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
pure_assume_wp0 p
/// The [DIV] effect for divergent computations
///
/// The wp-calculus for [DIV] is same as that of [PURE]
(** The effect of divergence: from a specificational perspective it is
identical to PURE, however the specs are given a partial
correctness interpretation. Computations with the [DIV] effect may
not terminate. *)
new_effect {
DIV : a:Type -> wp:pure_wp a -> Effect
with
return_wp = pure_return
; bind_wp = pure_bind_wp
; if_then_else = pure_if_then_else
; ite_wp = pure_ite_wp
; stronger = pure_stronger
; close_wp = pure_close_wp
; trivial = pure_trivial
}
(** [PURE] computations can be silently promoted for use in a [DIV] context *)
sub_effect PURE ~> DIV { lift_wp = purewp_id }
(** [Div] is the Hoare-style counterpart of the wp-indexed [DIV] *)
unfold
let div_hoare_to_wp (#a:Type) (#pre:pure_pre) (post:pure_post' a pre) : Tot (pure_wp a) =
reveal_opaque (`%pure_wp_monotonic) pure_wp_monotonic;
fun (p:pure_post a) -> pre /\ (forall a. post a ==> p a)
effect Div (a: Type) (pre: pure_pre) (post: pure_post' a pre) =
DIV a (div_hoare_to_wp post)
(** [Dv] is the instance of [DIV] with trivial pre- and postconditions *)
effect Dv (a: Type) = DIV a (pure_null_wp a)
(** We use the [EXT] effect to underspecify external system calls
as being impure but having no observable effect on the state *)
effect EXT (a: Type) = Dv a
/// The [STATE_h] effect template for stateful computations, generic
/// in the type of the state.
///
/// Note, [STATE_h] is itself not a computation type in F*, since it
/// is parameterized by the type of heap. However, instantiations of
/// [STATE_h] with specific types of the heap are computation
/// types. See, e.g., [FStar.ST] for such instantiations.
///
/// Weakest preconditions for stateful computations transform
/// [st_post_h] postconditions to [st_pre_h] preconditions. Both are
/// parametric in the type of the state, here denoted by the
/// [heap:Type] variable.
(** Preconditions are predicates on the [heap] *)
let st_pre_h (heap: Type) = heap -> GTot Type0
(** Postconditions relate [a]-typed results to the final [heap], here
refined by some pure proposition [pre], typically instantiated to
the precondition applied to the initial [heap] *)
let st_post_h' (heap a pre: Type) = a -> _: heap{pre} -> GTot Type0
(** Postconditions without refinements *)
let st_post_h (heap a: Type) = st_post_h' heap a True
(** The type of the main WP-transformer for stateful computations *)
let st_wp_h (heap a: Type) = st_post_h heap a -> Tot (st_pre_h heap)
(** Returning a value does not transform the state *)
unfold
let st_return (heap a: Type) (x: a) (p: st_post_h heap a) = p x
(** Sequential composition of stateful WPs *)
unfold
let st_bind_wp
(heap: Type)
(a b: Type)
(wp1: st_wp_h heap a)
(wp2: (a -> GTot (st_wp_h heap b)))
(p: st_post_h heap b)
(h0: heap)
= wp1 (fun a h1 -> wp2 a p h1) h0
(** Branching for stateful WPs *)
unfold
let st_if_then_else
(heap a p: Type)
(wp_then wp_else: st_wp_h heap a)
(post: st_post_h heap a)
(h0: heap)
= wp_then post h0 /\ (~p ==> wp_else post h0)
(** As with [PURE] the [wp] combinator names the postcondition as
[k] to avoid duplicating it. *)
unfold
let st_ite_wp (heap a: Type) (wp: st_wp_h heap a) (post: st_post_h heap a) (h0: heap) =
forall (k: st_post_h heap a).
(forall (x: a) (h: heap). {:pattern (guard_free (k x h))} post x h ==> k x h) ==> wp k h0
(** Subsumption for stateful WPs *)
unfold
let st_stronger (heap a: Type) (wp1 wp2: st_wp_h heap a) =
(forall (p: st_post_h heap a) (h: heap). wp1 p h ==> wp2 p h)
(** Closing the scope of a binder within a stateful WP *)
unfold
let st_close_wp (heap a b: Type) (wp: (b -> GTot (st_wp_h heap a))) (p: st_post_h heap a) (h: heap) =
(forall (b: b). wp b p h)
(** Applying a stateful WP to a trivial postcondition *)
unfold
let st_trivial (heap a: Type) (wp: st_wp_h heap a) = (forall h0. wp (fun r h1 -> True) h0)
(** Introducing a new effect template [STATE_h] *)
new_effect {
STATE_h (heap: Type) : result: Type -> wp: st_wp_h heap result -> Effect
with
return_wp = st_return heap
; bind_wp = st_bind_wp heap
; if_then_else = st_if_then_else heap
; ite_wp = st_ite_wp heap
; stronger = st_stronger heap
; close_wp = st_close_wp heap
; trivial = st_trivial heap
}
/// The [EXN] effect for computations that may raise exceptions or
/// fatal errors
///
/// Weakest preconditions for stateful computations transform
/// [ex_post] postconditions (predicates on [result]s) to [ex_pre]
/// precondition propositions.
(** Normal results are represented using [V x].
Handleable exceptions are represented [E e].
Fatal errors are [Err msg]. *)
noeq
type result (a: Type) =
| V : v: a -> result a
| E : e: exn -> result a
| Err : msg: string -> result a
(** Exceptional preconditions are just propositions *)
let ex_pre = Type0
(** Postconditions on results refined by a precondition *)
let ex_post' (a pre: Type) = _: result a {pre} -> GTot Type0
(** Postconditions on results *)
let ex_post (a: Type) = ex_post' a True
(** Exceptions WP-predicate transformers *)
let ex_wp (a: Type) = ex_post a -> GTot ex_pre
(** Returning a value [x] normally promotes it to the [V x] result *)
unfold
let ex_return (a: Type) (x: a) (p: ex_post a) : GTot Type0 = p (V x)
(** Sequential composition of exception-raising code requires case analysing
the result of the first computation before "running" the second one *)
unfold
let ex_bind_wp (a b: Type) (wp1: ex_wp a) (wp2: (a -> GTot (ex_wp b))) (p: ex_post b)
: GTot Type0 =
forall (k: ex_post b).
(forall (rb: result b). {:pattern (guard_free (k rb))} p rb ==> k rb) ==>
(wp1 (function
| V ra1 -> wp2 ra1 k
| E e -> k (E e)
| Err m -> k (Err m)))
(** As for other effects, branching in [ex_wp] appears in two forms.
First, a simple case analysis on [p] *)
unfold
let ex_if_then_else (a p: Type) (wp_then wp_else: ex_wp a) (post: ex_post a) =
wp_then post /\ (~p ==> wp_else post)
(** Naming continuations for use with branching *)
unfold
let ex_ite_wp (a: Type) (wp: ex_wp a) (post: ex_post a) =
forall (k: ex_post a).
(forall (rb: result a). {:pattern (guard_free (k rb))} post rb ==> k rb) ==> wp k
(** Subsumption for exceptional WPs *)
unfold
let ex_stronger (a: Type) (wp1 wp2: ex_wp a) = (forall (p: ex_post a). wp1 p ==> wp2 p)
(** Closing the scope of a binder for exceptional WPs *)
unfold
let ex_close_wp (a b: Type) (wp: (b -> GTot (ex_wp a))) (p: ex_post a) = (forall (b: b). wp b p)
(** Applying a computation with a trivial postcondition *)
unfold
let ex_trivial (a: Type) (wp: ex_wp a) = wp (fun r -> True)
(** Introduce a new effect for [EXN] *)
new_effect {
EXN : result: Type -> wp: ex_wp result -> Effect
with
return_wp = ex_return
; bind_wp = ex_bind_wp
; if_then_else = ex_if_then_else
; ite_wp = ex_ite_wp
; stronger = ex_stronger
; close_wp = ex_close_wp
; trivial = ex_trivial
}
(** A Hoare-style abbreviation for EXN *)
effect Exn (a: Type) (pre: ex_pre) (post: ex_post' a pre) =
EXN a (fun (p: ex_post a) -> pre /\ (forall (r: result a). post r ==> p r))
(** We include divergence in exceptions.
NOTE: BE WARNED, CODE IN THE [EXN] EFFECT IS ONLY CHECKED FOR
PARTIAL CORRECTNESS *)
unfold
let lift_div_exn (a: Type) (wp: pure_wp a) (p: ex_post a) = wp (fun a -> p (V a))
sub_effect DIV ~> EXN { lift_wp = lift_div_exn }
(** A variant of [Exn] with trivial pre- and postconditions *)
effect Ex (a: Type) = Exn a True (fun v -> True)
/// The [ALL_h] effect template for computations that may diverge,
/// raise exceptions or fatal errors, and uses a generic state.
///
/// Note, this effect is poorly named, particularly as F* has since
/// gained many more user-defined effect. We no longer have an effect
/// that includes all others.
///
/// We might rename this in the future to something like [StExnDiv_h].
///
/// We layer state on top of exceptions, meaning that raising an
/// exception does not discard the state.
///
/// As with [STATE_h], [ALL_h] is not a computation type, though its
/// instantiation with a specific type of [heap] (in FStar.All) is.
(** [all_pre_h] is a predicate on the initial state *)
let all_pre_h (h: Type) = h -> GTot Type0
(** Postconditions relate [result]s to final [heap]s refined by a precondition *)
let all_post_h' (h a pre: Type) = result a -> _: h{pre} -> GTot Type0
(** A variant of [all_post_h'] without the precondition refinement *)
let all_post_h (h a: Type) = all_post_h' h a True
(** WP predicate transformers for the [All_h] effect template *)
let all_wp_h (h a: Type) = all_post_h h a -> Tot (all_pre_h h)
(** Returning a value [x] normally promotes it to the [V x] result
without touching the [heap] *)
unfold
let all_return (heap a: Type) (x: a) (p: all_post_h heap a) = p (V x)
(** Sequential composition for [ALL_h] is like [EXN]: case analysis of
the exceptional result before "running" the continuation *)
unfold
let all_bind_wp
(heap: Type)
(a b: Type)
(wp1: all_wp_h heap a)
(wp2: (a -> GTot (all_wp_h heap b)))
(p: all_post_h heap b)
(h0: heap)
: GTot Type0 =
wp1 (fun ra h1 ->
(match ra with
| V v -> wp2 v p h1
| E e -> p (E e) h1
| Err msg -> p (Err msg) h1))
h0
(** Case analysis in [ALL_h] *)
unfold
let all_if_then_else
(heap a p: Type)
(wp_then wp_else: all_wp_h heap a)
(post: all_post_h heap a)
(h0: heap)
= wp_then post h0 /\ (~p ==> wp_else post h0)
(** Naming postcondition for better sharing in [ALL_h] *)
unfold
let all_ite_wp (heap a: Type) (wp: all_wp_h heap a) (post: all_post_h heap a) (h0: heap) =
forall (k: all_post_h heap a).
(forall (x: result a) (h: heap). {:pattern (guard_free (k x h))} post x h ==> k x h) ==> wp k h0
(** Subsumption in [ALL_h] *)
unfold
let all_stronger (heap a: Type) (wp1 wp2: all_wp_h heap a) =
(forall (p: all_post_h heap a) (h: heap). wp1 p h ==> wp2 p h)
(** Closing a binder in the scope of an [ALL_h] wp *)
unfold
let all_close_wp
(heap a b: Type)
(wp: (b -> GTot (all_wp_h heap a)))
(p: all_post_h heap a)
(h: heap)
= (forall (b: b). wp b p h)
(** Applying an [ALL_h] wp to a trivial postcondition *)
unfold
let all_trivial (heap a: Type) (wp: all_wp_h heap a) = (forall (h0: heap). wp (fun r h1 -> True) h0)
(** Introducing the [ALL_h] effect template *)
new_effect {
ALL_h (heap: Type) : a: Type -> wp: all_wp_h heap a -> Effect
with
return_wp = all_return heap
; bind_wp = all_bind_wp heap
; if_then_else = all_if_then_else heap
; ite_wp = all_ite_wp heap
; stronger = all_stronger heap
; close_wp = all_close_wp heap
; trivial = all_trivial heap
}
(**
Controlling inversions of inductive type
Given a value of an inductive type [v:t], where [t = A | B], the SMT
solver can only prove that [v=A \/ v=B] by _inverting_ [t]. This
inversion is controlled by the [ifuel] setting, which usually limits
the recursion depth of the number of such inversions that the solver
can perform.
The [inversion] predicate below is a way to circumvent the
[ifuel]-based restrictions on inversion depth. In particular, if the
[inversion t] is available in the SMT solver's context, it is free to
invert [t] infinitely, regardless of the [ifuel] setting.
Be careful using this, since it explicitly subverts the [ifuel]
setting. If used unwisely, this can lead to very poor SMT solver
performance. *)
[@@ remove_unused_type_parameters [0]]
val inversion (a: Type) : Type0
(** To introduce [inversion t] in the SMT solver's context, call
[allow_inversion t]. *)
val allow_inversion (a: Type) : Pure unit (requires True) (ensures (fun x -> inversion a))
(** Since the [option] type is so common, we always allow inverting
options, regardless of [ifuel] *)
val invertOption (a: Type)
: Lemma (requires True) (ensures (forall (x: option a). None? x \/ Some? x)) [SMTPat (option a)]
(** Values of type [a] or type [b] *)
type either a b =
| Inl : v: a -> either a b
| Inr : v: b -> either a b
(** Projections for the components of a dependent pair *)
let dfst (#a: Type) (#b: a -> GTot Type) (t: dtuple2 a b)
: Tot a
= Mkdtuple2?._1 t
let dsnd (#a: Type) (#b: a -> GTot Type) (t: dtuple2 a b)
: Tot (b (Mkdtuple2?._1 t))
= Mkdtuple2?._2 t
(** Dependent triples, with sugar [x:a & y:b x & c x y] *)
unopteq
type dtuple3 (a: Type) (b: (a -> GTot Type)) (c: (x: a -> b x -> GTot Type)) =
| Mkdtuple3 : _1: a -> _2: b _1 -> _3: c _1 _2 -> dtuple3 a b c
(** Dependent quadruples, with sugar [x:a & y:b x & z:c x y & d x y z] *)
unopteq
type dtuple4
(a: Type) (b: (x: a -> GTot Type)) (c: (x: a -> b x -> GTot Type))
(d: (x: a -> y: b x -> z: c x y -> GTot Type))
= | Mkdtuple4 : _1: a -> _2: b _1 -> _3: c _1 _2 -> _4: d _1 _2 _3 -> dtuple4 a b c d
(** Dependent quadruples, with sugar [x:a & y:b x & z:c x y & d x y z] *)
unopteq
type dtuple5
(a: Type) (b: (x: a -> GTot Type)) (c: (x: a -> b x -> GTot Type))
(d: (x: a -> y: b x -> z: c x y -> GTot Type))
(e: (x: a -> y: b x -> z: c x y -> w: d x y z -> GTot Type))
= | Mkdtuple5 : _1: a -> _2: b _1 -> _3: c _1 _2 -> _4: d _1 _2 _3 -> _5: e _1 _2 _3 _4 -> dtuple5 a b c d e
(** Explicitly discarding a value *)
let ignore (#a: Type) (x: a) : Tot unit = ()
(** In a context where [false] is provable, you can prove that any
type [a] is inhabited.
There are many proofs of this fact in F*. Here, in the implementation, we build an
infinitely looping function, since the termination check succeeds
in a [False] context. *)
val false_elim (#a: Type) (u: unit{False}) : Tot a
/// Attributes:
///
/// An attribute is any F* term.
///
/// Attributes are desugared and checked for being well-scoped. But,
/// they are not type-checked.
///
/// It is associated with a definition using the [[@@attribute]]
/// notation, just preceding the definition.
(** We collect several internal ocaml attributes into a single
inductive type.
This may be unnecessary. In the future, we are likely to flatten
this definition into several definitions of abstract top-level
names.
An example:
{[
[@@ CInline ] let f x = UInt32.(x +%^ 1)
]}
is extracted to C by KaRaMeL to a C definition tagged with the
[inline] qualifier. *)
type __internal_ocaml_attributes =
| PpxDerivingShow (* Generate [@@@ deriving show ] on the resulting OCaml type *)
| PpxDerivingShowConstant of string (* Similar, but for constant printers. *)
| PpxDerivingYoJson (* Generate [@@@ deriving yojson ] on the resulting OCaml type *)
| CInline
(* KaRaMeL-only: generates a C "inline" attribute on the resulting
* function declaration. *)
| Substitute
(* KaRaMeL-only: forces KaRaMeL to inline the function at call-site; this is
* deprecated and the recommended way is now to use F*'s
* [inline_for_extraction], which now also works for stateful functions. *)
| Gc
(* KaRaMeL-only: instructs KaRaMeL to heap-allocate any value of this
* data-type; this requires running with a conservative GC as the
* allocations are not freed. *)
| Comment of string
(* KaRaMeL-only: attach a comment to the declaration. Note that using F*-doc
* syntax automatically fills in this attribute. *)
| CPrologue of string
(* KaRaMeL-only: verbatim C code to be prepended to the declaration.
* Multiple attributes are valid and accumulate, separated by newlines. *)
| CEpilogue of string (* Ibid. *)
| CConst of string
(* KaRaMeL-only: indicates that the parameter with that name is to be marked
* as C const. This will be checked by the C compiler, not by KaRaMeL or F*.
*
* This is deprecated and doesn't work as intended. Use
* LowStar.ConstBuffer.fst instead! *)
| CCConv of string (* A calling convention for C, one of stdcall, cdecl, fastcall *)
| CAbstractStruct
(* KaRaMeL-only: for types that compile to struct types (records and
* inductives), indicate that the header file should only contain a forward
* declaration, which in turn forces the client to only ever use this type
* through a pointer. *)
| CIfDef (* KaRaMeL-only: on a given `val foo`, compile if foo with #ifdef. *)
| CMacro
(* KaRaMeL-only: for a top-level `let v = e`, compile as a macro *)
| CNoInline
(* For security-sensitive functions only: generate special attributes in C
to prevent inlining; if the function is subjected to a -static-header
option, the `inline` attribute will be removed, but the static will
remain. *)
(** The [inline_let] attribute on a local let-binding, instructs the
extraction pipeline to inline the definition. This may be both to
avoid generating unnecessary intermediate variables, and also to
enable further partial evaluation. Note, use this with care, since
inlining all lets can lead to an exponential blowup in code
size. *)
val inline_let : unit
(** The [rename_let] attribute support a form of metaprogramming for
the names of let-bound variables used in extracted code.
This is useful, particularly in conjunction with partial
evaluation, to ensure that names reflect their usage context.
See tests/micro-benchmarks/Renaming*.fst *)
val rename_let (new_name: string) : Tot unit
(** The [plugin] attribute is used in conjunction with native
compilation of F* components, accelerating their reduction
relative to the default strategy of just interpreting them.
See examples/native_tactics for several examples. *)
val plugin (x: int) : Tot unit
(** An attribute to mark things that the typechecker should *first*
elaborate and typecheck, but unfold before verification. *)
val tcnorm : unit
(** We erase all ghost functions and unit-returning pure functions to
[()] at extraction. This creates a small issue with abstract
types. Consider a module that defines an abstract type [t] whose
(internal) definition is [unit] and also defines [f: int -> t]. [f]
would be erased to be just [()] inside the module, while the
client calls to [f] would not, since [t] is abstract. To get
around this, when extracting interfaces, if we encounter an
abstract type, we tag it with this attribute, so that
extraction can treat it specially.
Note, since the use of cross-module inlining (the [--cmi] option),
this attribute is no longer necessary. We retain it for legacy,
but will remove it in the future. *)
val must_erase_for_extraction : unit
(** This attribute is used with the Dijkstra Monads for Free
construction to track position information in generated VCs *)
val dm4f_bind_range : unit
(** When attached a top-level definition, the typechecker will succeed
if and only if checking the definition results in an error. The
error number list is actually OPTIONAL. If present, it will be
checked that the definition raises exactly those errors in the
specified multiplicity, but order does not matter. *)
val expect_failure (errs: list int) : Tot unit
(** When --lax is present, with the previous attribute since some
definitions only fail when verification is turned on. With this
attribute, one can ensure that a definition fails while lax-checking
too. Same semantics as above, but lax mode will be turned on for the
definition. *)
val expect_lax_failure (errs: list int) : Tot unit
(** Print the time it took to typecheck a top-level definition *)
val tcdecltime : unit
(** This attribute is to be used as a hint for the unifier. A
function-typed symbol `t` marked with this attribute will be treated
as being injective in all its arguments by the unifier. That is,
given a problem `t a1..an =?= t b1..bn` the unifier will solve it by
proving `ai =?= bi` for all `i`, without trying to unfold the
definition of `t`. *)
val unifier_hint_injective : unit
(**
This attribute is used to control the evaluation order
and unfolding strategy for certain definitions.
In particular, given
{[
[@@(strict_on_arguments [1;2])]
let f x0 (x1:list x0) (x1:option x0) = e
]}
An application [f e0 e1 e2] is reduced by the normalizer by:
1. evaluating [e0 ~>* v0, e1 ~>* v1, e2 ~>* v2]
2 a.
If, according to the positional arguments [1;2],
if v1 and v2 have constant head symbols
(e.g., v1 = Cons _ _ _, and v2 = None _)
then [f] is unfolded to [e] and reduced as
{[e[v0/x0][v1/x1][v2/x2]]}
2 b.
Otherwise, [f] is not unfolded and the term is [f e0 e1 e2]
reduces to [f v0 v1 v2]. *)
val strict_on_arguments (x: list int) : Tot unit
(**
* An attribute to tag a tactic designated to solve any
* unsolved implicit arguments remaining at the end of type inference.
**)
val resolve_implicits : unit
(**
* Implicit arguments can be tagged with an attribute [abc] to dispatch
* their solving to a user-defined tactic also tagged with the same
* attribute and resolve_implicits [@@abc; resolve_implicits].
* However, sometimes it is useful to have multiple such
* [abc]-tagged tactics in scope. In such a scenario, to choose among them,
* one can use the attribute as shown below to declare that [t] overrides
* all the tactics [t1...tn] and should be used to solve [abc]-tagged
* implicits, so long as [t] is not iself overridden by some other tactic.
[@@resolve_implicits; abc; override_resolve_implicits_handler abc [`%t1; ... `%tn]]
let t = e
**)
val override_resolve_implicits_handler : #a:Type -> a -> list string -> Tot unit
(** A tactic registered to solve implicits with the (handle_smt_goals)
attribute will receive the SMT goal generated during typechecking
just before it is passed to the SMT solver.
*)
val handle_smt_goals : unit
(** This attribute can be added to an inductive type definition,
indicating that it should be erased on extraction to `unit`.
However, any pattern matching on the inductive type results
in a `Ghost` effect, ensuring that computationally relevant
code cannot rely on the values of the erasable type.
See tests/micro-benchmarks/Erasable.fst, for examples. Also
see https://github.com/FStarLang/FStar/issues/1844 *)
val erasable : unit
(** [commute_nested_matches]
This attribute can be used to decorate an inductive type [t]
During normalization, if reduction is blocked on matching the
constructors of [t] in the following sense:
[
match (match e0 with | P1 -> e1 | ... | Pn -> en) with
| Q1 -> f1 ... | Qm -> fm
]
i.e., the outer match is stuck due to the inner match on [e0]
being stuck, and if the head constructor the outer [Qi] patterns
are the constructors of the decorated inductive type [t], then,
this is reduced to
[
match e0 with
| P1 -> (match e1 with | Q1 -> f1 ... | Qm -> fm)
| ...
| Pn -> (match en with | Q1 -> f1 ... | Qm -> fm)
]
This is sometimes useful when partially evaluating code before
extraction, particularly when aiming to obtain first-order code
for KaRaMeL. However, this attribute should be used with care,
since if after the rewriting the inner matches do not reduce, then
this can cause an explosion in code size.
See tests/micro-benchmarks/CommuteNestedMatches.fst
and examples/layeredeffects/LowParseWriters.fsti
*)
val commute_nested_matches : unit
(** This attribute controls extraction: it can be used to disable
extraction of a given top-level definition into a specific backend,
such as "OCaml". If any extracted code must call into an erased
function, an error will be raised (code 340).
*)
val noextract_to (backend:string) : Tot unit
(** This attribute decorates a let binding, e.g.,
[@@normalize_for_extraction steps]
let f = e
The effect is that prior to extraction, F* will first reduce [e]
using the normalization [steps], and then proceed to extract it as
usual.
Almost the same behavior can be achieved by using a
[postprocess_for_extraction_with t] attribute, which runs tactic
[t] on the goal [e == ?u] and extracts the solution to [?u] in
place of [e]. However, using a tactic to postprocess a term is
more general than needed for some cases.
In particular, if we intend to only normalize [e] before
extraction (rather than applying some other form of equational
reasoning), then using [normalize_for_extraction] can be more
efficient, for the following reason:
Since we are reducing [e] just before extraction, F* can enable an
otherwise non-user-facing normalization feature that allows all
arguments marked [@@@erasable] to be erased to [()]---these terms
will anyway be extracted to [()] so erasing them during
normalization is a useful optimization.
*)
val normalize_for_extraction (steps:list norm_step) : Tot unit
(** A layered effect definition may optionally be annotated with
(ite_soundness_by t) attribute, where t is another attribute
When so, the implicits and the smt guard generated when
checking the soundness of the if-then-else combinator, are
dispatched to the tactic in scope that has the t attribute (in addition
to the resolve_implicits attribute as usual)
See examples/layeredeffects/IteSoundess.fst for a few examples
*)
val ite_soundness_by (attribute: unit): Tot unit
(** By-default functions that have a layered effect, need to have a type
annotation for their bodies
However, a layered effect definition may contain the default_effect
attribute to indicate to the typechecker that for missing annotations,
use the default effect.
The default effect attribute takes as argument a string, that is the name
of the default effect, two caveats:
- The argument must be a string constant (not a name, for example)
- The argument should be the fully qualified name
For example, the TAC effect in FStar.Tactics.Effect.fsti specifies
its default effect as FStar.Tactics.Tac
F* will typecheck that the default effect only takes one argument,
the result type of the computation
*)
val default_effect (s:string) : Tot unit
(** A layered effect may optionally be annotated with the
top_level_effect attribute so indicate that this effect may
appear at the top-level
(e.g., a top-level let x = e, where e has a layered effect type)
The top_level_effect attribute takes (optional) string argument, that is the
name of the effect abbreviation that may constrain effect arguments
for the top-level effect
As with default effect, the string argument must be a string constant,
and fully qualified
E.g. a Hoare-style effect `M a pre post`, may have the attribute
`@@ top_level_effect "N"`, where the effect abbreviation `N` may be:
effect N a post = M a True post
i.e., enforcing a trivial precondition if `M` appears at the top-level
If the argument to `top_level_effect` is absent, then the effect itself
is allowed at the top-level with any effect arguments
See tests/micro-benchmarks/TopLevelIndexedEffects.fst for examples
*)
val top_level_effect (s:string) : Tot unit
(** This attribute can be annotated on the binders in an effect signature
to indicate that they are effect parameters. For example, for a
state effect that is parametric in the type of the state, the state
index may be marked as an effect parameter.
Also see https://github.com/FStarLang/FStar/wiki/Indexed-effects
*)
val effect_param : unit
(** Bind definition for a layered effect may optionally contain range
arguments, that are provided by the typechecker during reification
This attribute on the effect definition indicates that the bind
has range arguments.
See for example the TAC effect in FStar.Tactics.Effect.fsti
*)
val bind_has_range_args : unit
(** An indexed effect definition may be annotated with
this attribute to indicate that the effect should be
extracted "natively". E.g., the `bind` of the effect is
extracted to primitive `let` bindings
As an example, `Steel` effects (the effect for concurrent
separation logic) are marked as such
*)
val primitive_extraction : unit
(** A qualifier on a type definition which when used in co-domain position
on an arrow type will be extracted as if it were an impure effect type.
e.g., if you have
[@@extract_as_impure_effect]
val stt (a:Type) (pre:_) (post:_) : Type
then arrows of the form `a -> stt b p q` will be extracted
similarly to `a -> Dv b`.
*)
val extract_as_impure_effect : unit
(** A binder in a definition/declaration may optionally be annotated as strictly_positive
When the let definition is used in a data constructor type in an inductive
definition, this annotation is used to check the positivity of the inductive
Further F* checks that the binder is actually positive in the let definition
See tests/micro-benchmarks/Positivity.fst and NegativeTests.Positivity.fst for a few examples
*)
val strictly_positive : unit
(** A binder in a definition/declaration may optionally be annotated as unused.
This is used in the strict positivity checker. E.g., a type such as the one
below is accepted
let f ([@@@unused] a:Type) = unit
type t = | MkT: f t -> t
F* checks that the binder is actually unused in the definition
See tests/micro-benchmarks/Positivity.fst for a few examples
*)
val unused : unit
(** This attribute may be added to an inductive type
to disable auto generated projectors
Normally there should not be any need to use this unless:
for some reason F* cannot typecheck the auto-generated projectors.
Another reason to use this attribute may be to avoid generating and
typechecking lot of projectors, most of which are not going to be used
in the rest of the program
*)
val no_auto_projectors : unit
(** This attribute can be added to a let definition
and indicates to the typechecker to typecheck the signature of the definition
without using subtyping. This is sometimes useful for indicating that a lemma
can be applied by the tactic engine without requiring to check additional
subtyping obligations
*)
val no_subtyping : unit
val admit_termination : unit
(** Pure and ghost inner let bindings are now always inlined during
the wp computation, if: the return type is not unit and the head
symbol is not marked irreducible.
To circumvent this behavior, singleton can be used.
See the example usage in ulib/FStar.Algebra.Monoid.fst. *)
val singleton (#a: Type) (x: a) : Tot (y: a{y == x})
(** A weakening coercion from eqtype to Type.
One of its uses is in types of layered effect combinators that
are subjected to stricter typing discipline (no subtyping) *)
unfold let eqtype_as_type (a:eqtype) : Type = a
(** A coercion of the [x] from [a] to [b], when [a] is provably equal
to [b]. In most cases, F* will silently coerce from [a] to [b]
along a provable equality (as in the body of this | false | false | FStar.Pervasives.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val coerce_eq: #a: Type -> #b: Type -> squash (a == b) -> x: a -> b | [] | FStar.Pervasives.coerce_eq | {
"file_name": "ulib/FStar.Pervasives.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | _: Prims.squash (a == b) -> x: a -> b | {
"end_col": 67,
"end_line": 1210,
"start_col": 66,
"start_line": 1210
} |
Prims.Tot | [
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "ST"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "Lib.Buffer",
"short_module": "LB"
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "ST"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Hash.Definitions",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec.Hash.Definitions",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Hash.Definitions",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec.Hash.Definitions",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Hash",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Hash",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let finish_st (a: keccak_alg) =
s:state (| a, () |) -> dst:B.buffer Lib.IntTypes.uint8 -> l:Lib.IntTypes.size_t {
B.length dst == (if is_shake a then Lib.IntTypes.(v (l <: size_t)) else Spec.Hash.Definitions.hash_length a)
} -> ST.Stack unit
(requires (fun h ->
B.live h s /\ B.live h dst /\ B.disjoint s dst))
(ensures (fun h0 _ h1 ->
B.(modifies (loc_buffer dst `loc_union` loc_buffer s) h0 h1) /\
Seq.equal (B.as_seq h1 dst) (Spec.Agile.Hash.finish a (as_seq h0 s) (v_len a l)))) | let finish_st (a: keccak_alg) = | false | null | false |
s: state (| a, () |) ->
dst: B.buffer Lib.IntTypes.uint8 ->
l:
Lib.IntTypes.size_t
{ B.length dst ==
(if is_shake a
then let open Lib.IntTypes in v (l <: size_t)
else Spec.Hash.Definitions.hash_length a) }
-> ST.Stack unit
(requires (fun h -> B.live h s /\ B.live h dst /\ B.disjoint s dst))
(ensures
(fun h0 _ h1 ->
B.(modifies ((loc_buffer dst) `loc_union` (loc_buffer s)) h0 h1) /\
Seq.equal (B.as_seq h1 dst) (Spec.Agile.Hash.finish a (as_seq h0 s) (v_len a l)))) | {
"checked_file": "Hacl.Hash.SHA3.fsti.checked",
"dependencies": [
"Spec.Hash.Definitions.fst.checked",
"Spec.Agile.Hash.fsti.checked",
"prims.fst.checked",
"LowStar.Buffer.fst.checked",
"Lib.IntTypes.fsti.checked",
"Hacl.Hash.Definitions.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.HyperStack.ST.fsti.checked"
],
"interface_file": false,
"source_file": "Hacl.Hash.SHA3.fsti"
} | [
"total"
] | [
"Spec.Hash.Definitions.keccak_alg",
"Hacl.Hash.Definitions.state",
"Prims.Mkdtuple2",
"Spec.Hash.Definitions.hash_alg",
"Hacl.Hash.Definitions.m_spec",
"LowStar.Buffer.buffer",
"Lib.IntTypes.uint8",
"Lib.IntTypes.size_t",
"Prims.eq2",
"Prims.int",
"Prims.l_or",
"Prims.b2t",
"Prims.op_GreaterThanOrEqual",
"Lib.IntTypes.range",
"Lib.IntTypes.U32",
"LowStar.Monotonic.Buffer.length",
"LowStar.Buffer.trivial_preorder",
"Spec.Hash.Definitions.is_shake",
"Lib.IntTypes.v",
"Lib.IntTypes.PUB",
"Prims.bool",
"Spec.Hash.Definitions.hash_length",
"Prims.unit",
"FStar.Monotonic.HyperStack.mem",
"Prims.l_and",
"LowStar.Monotonic.Buffer.live",
"Hacl.Hash.Definitions.impl_word",
"LowStar.Monotonic.Buffer.disjoint",
"LowStar.Monotonic.Buffer.modifies",
"LowStar.Monotonic.Buffer.loc_union",
"LowStar.Monotonic.Buffer.loc_buffer",
"FStar.Seq.Base.equal",
"LowStar.Monotonic.Buffer.as_seq",
"Spec.Agile.Hash.finish",
"Hacl.Hash.Definitions.as_seq",
"Hacl.Hash.SHA3.v_len"
] | [] | module Hacl.Hash.SHA3
// This module contains low-level implementations that implement the
// "incremental" API, found in Spec.Hash.Incremental.
//
// This is just a lightweight wrapper around the actual implementation in
// code/sha3; by re-exporting the correct type signatures, this makes
// instantiating the streaming functor trivial. (Also note that the
// implementation in code/sha3 does not export the "update_multi" version, since
// it does everything in one go, so it's convenient to make this explicit here.)
//
// NOTE: unlike other modules, this one is not entirely noextract
// inline_for_extraction. There are two reasons. First, because all Keccak
// variants share the same state type, these functions do *NOT* need to be
// inlined to fit in the Low* subset. Second, for this reason, they are not
// always reduced at compile-time for a chosen value of `a`, meaning that we
// need the code to look decent (and not have everything inlined aggressively
// when there are no opportunities for reduction).
open Spec.Hash.Definitions
open Hacl.Hash.Definitions
val block_len (a: keccak_alg): Lib.IntTypes.(n:size_t { v n = block_length a })
val hash_len (a: keccak_alg { not (is_shake a) }): Lib.IntTypes.(n:size_t { v n = hash_length a })
inline_for_extraction noextract
val init (a: keccak_alg): init_st (|a, ()|)
inline_for_extraction noextract
val update_multi (a: keccak_alg): update_multi_st (|a, ()|)
inline_for_extraction noextract
val update_last (a: keccak_alg): update_last_st (|a, ()|)
inline_for_extraction noextract
val finish (a: keccak_alg { not (is_shake a) }): finish_st (| a, ()|)
inline_for_extraction noextract
val hash (a: keccak_alg { not (is_shake a) }): hash_st a
/// A couple helpers specifically for the Keccak functor, which live here
/// because this module has an fsti and therefore can friend specs.
let v_len a (l: Lib.IntTypes.size_t): Spec.Hash.Definitions.output_length a =
let _ = allow_inversion hash_alg in
if is_shake a then
Lib.IntTypes.(v #U32 #PUB (l <: size_t))
else
()
module B = LowStar.Buffer
module ST = FStar.HyperStack.ST
/// This is a variant of Hacl.Hash.SHA3.finish that takes an optional output
/// length. This is used in the Keccak streaming functor, wherein this signature
/// eventually becomes exposed in the C API. As a result, we cannot have `l` be
/// an indexed type (e.g. if is_shake then size_t else unit) because that would
/// not be extractable to C. So, we contend with a suboptimal contract, which
/// is: "unless a is a shake algorithm, the length is ignored".
noextract inline_for_extraction | false | true | Hacl.Hash.SHA3.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val finish_st : a: Spec.Hash.Definitions.keccak_alg -> Type0 | [] | Hacl.Hash.SHA3.finish_st | {
"file_name": "code/hash/Hacl.Hash.SHA3.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: Spec.Hash.Definitions.keccak_alg -> Type0 | {
"end_col": 86,
"end_line": 69,
"start_col": 2,
"start_line": 62
} |
|
Prims.Tot | val v_len (a: _) (l: Lib.IntTypes.size_t) : Spec.Hash.Definitions.output_length a | [
{
"abbrev": true,
"full_module": "Lib.Buffer",
"short_module": "LB"
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "ST"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Hash.Definitions",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec.Hash.Definitions",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Hash.Definitions",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec.Hash.Definitions",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Hash",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Hash",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let v_len a (l: Lib.IntTypes.size_t): Spec.Hash.Definitions.output_length a =
let _ = allow_inversion hash_alg in
if is_shake a then
Lib.IntTypes.(v #U32 #PUB (l <: size_t))
else
() | val v_len (a: _) (l: Lib.IntTypes.size_t) : Spec.Hash.Definitions.output_length a
let v_len a (l: Lib.IntTypes.size_t) : Spec.Hash.Definitions.output_length a = | false | null | false | let _ = allow_inversion hash_alg in
if is_shake a then let open Lib.IntTypes in v #U32 #PUB (l <: size_t) | {
"checked_file": "Hacl.Hash.SHA3.fsti.checked",
"dependencies": [
"Spec.Hash.Definitions.fst.checked",
"Spec.Agile.Hash.fsti.checked",
"prims.fst.checked",
"LowStar.Buffer.fst.checked",
"Lib.IntTypes.fsti.checked",
"Hacl.Hash.Definitions.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.HyperStack.ST.fsti.checked"
],
"interface_file": false,
"source_file": "Hacl.Hash.SHA3.fsti"
} | [
"total"
] | [
"Spec.Hash.Definitions.hash_alg",
"Lib.IntTypes.size_t",
"Spec.Hash.Definitions.is_shake",
"Lib.IntTypes.v",
"Lib.IntTypes.U32",
"Lib.IntTypes.PUB",
"Prims.bool",
"Spec.Hash.Definitions.output_length",
"Prims.unit",
"FStar.Pervasives.allow_inversion"
] | [] | module Hacl.Hash.SHA3
// This module contains low-level implementations that implement the
// "incremental" API, found in Spec.Hash.Incremental.
//
// This is just a lightweight wrapper around the actual implementation in
// code/sha3; by re-exporting the correct type signatures, this makes
// instantiating the streaming functor trivial. (Also note that the
// implementation in code/sha3 does not export the "update_multi" version, since
// it does everything in one go, so it's convenient to make this explicit here.)
//
// NOTE: unlike other modules, this one is not entirely noextract
// inline_for_extraction. There are two reasons. First, because all Keccak
// variants share the same state type, these functions do *NOT* need to be
// inlined to fit in the Low* subset. Second, for this reason, they are not
// always reduced at compile-time for a chosen value of `a`, meaning that we
// need the code to look decent (and not have everything inlined aggressively
// when there are no opportunities for reduction).
open Spec.Hash.Definitions
open Hacl.Hash.Definitions
val block_len (a: keccak_alg): Lib.IntTypes.(n:size_t { v n = block_length a })
val hash_len (a: keccak_alg { not (is_shake a) }): Lib.IntTypes.(n:size_t { v n = hash_length a })
inline_for_extraction noextract
val init (a: keccak_alg): init_st (|a, ()|)
inline_for_extraction noextract
val update_multi (a: keccak_alg): update_multi_st (|a, ()|)
inline_for_extraction noextract
val update_last (a: keccak_alg): update_last_st (|a, ()|)
inline_for_extraction noextract
val finish (a: keccak_alg { not (is_shake a) }): finish_st (| a, ()|)
inline_for_extraction noextract
val hash (a: keccak_alg { not (is_shake a) }): hash_st a
/// A couple helpers specifically for the Keccak functor, which live here
/// because this module has an fsti and therefore can friend specs. | false | false | Hacl.Hash.SHA3.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val v_len (a: _) (l: Lib.IntTypes.size_t) : Spec.Hash.Definitions.output_length a | [] | Hacl.Hash.SHA3.v_len | {
"file_name": "code/hash/Hacl.Hash.SHA3.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: Spec.Hash.Definitions.hash_alg -> l: Lib.IntTypes.size_t -> Spec.Hash.Definitions.output_length a | {
"end_col": 6,
"end_line": 49,
"start_col": 77,
"start_line": 44
} |
Prims.Tot | val t_limbs:Hacl.Bignum.Definitions.limb_t | [
{
"abbrev": true,
"full_module": "Hacl.Bignum.MontArithmetic",
"short_module": "MA"
},
{
"abbrev": true,
"full_module": "Hacl.Bignum.SafeAPI",
"short_module": "BS"
},
{
"abbrev": true,
"full_module": "Hacl.Bignum",
"short_module": "BN"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let t_limbs: Hacl.Bignum.Definitions.limb_t = Lib.IntTypes.U32 | val t_limbs:Hacl.Bignum.Definitions.limb_t
let t_limbs:Hacl.Bignum.Definitions.limb_t = | false | null | false | Lib.IntTypes.U32 | {
"checked_file": "Hacl.Bignum4096_32.fsti.checked",
"dependencies": [
"prims.fst.checked",
"Lib.IntTypes.fsti.checked",
"Hacl.Bignum.SafeAPI.fst.checked",
"Hacl.Bignum.MontArithmetic.fsti.checked",
"Hacl.Bignum.Definitions.fst.checked",
"Hacl.Bignum.Convert.fst.checked",
"Hacl.Bignum.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked"
],
"interface_file": false,
"source_file": "Hacl.Bignum4096_32.fsti"
} | [
"total"
] | [
"Lib.IntTypes.U32"
] | [] | module Hacl.Bignum4096_32
open FStar.Mul
module BN = Hacl.Bignum
module BS = Hacl.Bignum.SafeAPI
module MA = Hacl.Bignum.MontArithmetic
#set-options "--z3rlimit 50 --fuel 0 --ifuel 0" | false | true | Hacl.Bignum4096_32.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 50,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val t_limbs:Hacl.Bignum.Definitions.limb_t | [] | Hacl.Bignum4096_32.t_limbs | {
"file_name": "code/bignum/Hacl.Bignum4096_32.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | Hacl.Bignum.Definitions.limb_t | {
"end_col": 62,
"end_line": 12,
"start_col": 46,
"start_line": 12
} |
Prims.Tot | [
{
"abbrev": true,
"full_module": "Hacl.Bignum.MontArithmetic",
"short_module": "MA"
},
{
"abbrev": true,
"full_module": "Hacl.Bignum.SafeAPI",
"short_module": "BS"
},
{
"abbrev": true,
"full_module": "Hacl.Bignum",
"short_module": "BN"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let lbignum = Hacl.Bignum.Definitions.lbignum | let lbignum = | false | null | false | Hacl.Bignum.Definitions.lbignum | {
"checked_file": "Hacl.Bignum4096_32.fsti.checked",
"dependencies": [
"prims.fst.checked",
"Lib.IntTypes.fsti.checked",
"Hacl.Bignum.SafeAPI.fst.checked",
"Hacl.Bignum.MontArithmetic.fsti.checked",
"Hacl.Bignum.Definitions.fst.checked",
"Hacl.Bignum.Convert.fst.checked",
"Hacl.Bignum.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked"
],
"interface_file": false,
"source_file": "Hacl.Bignum4096_32.fsti"
} | [
"total"
] | [
"Hacl.Bignum.Definitions.lbignum"
] | [] | module Hacl.Bignum4096_32
open FStar.Mul
module BN = Hacl.Bignum
module BS = Hacl.Bignum.SafeAPI
module MA = Hacl.Bignum.MontArithmetic
#set-options "--z3rlimit 50 --fuel 0 --ifuel 0"
inline_for_extraction noextract
let t_limbs: Hacl.Bignum.Definitions.limb_t = Lib.IntTypes.U32
inline_for_extraction noextract
let n_limbs: BN.meta_len t_limbs = 128ul
inline_for_extraction noextract
let n_bytes = n_limbs `FStar.UInt32.mul` 4ul
// A static assert that the number of bytes vs number of blocks matches. This is
// important for bn_to_bytes_be which takes a number of bytes, not a number of
// limbs. (It would be nice to fix this.)
let _ = assert_norm (Hacl.Bignum.Definitions.blocks n_bytes 4ul = n_limbs)
let _ = assert_norm (4096ul = Lib.IntTypes.(size (bits t_limbs)) `FStar.UInt32.mul` n_limbs) | false | true | Hacl.Bignum4096_32.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 50,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val lbignum : t: Hacl.Bignum.Definitions.limb_t -> len: Lib.IntTypes.size_t -> Type0 | [] | Hacl.Bignum4096_32.lbignum | {
"file_name": "code/bignum/Hacl.Bignum4096_32.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | t: Hacl.Bignum.Definitions.limb_t -> len: Lib.IntTypes.size_t -> Type0 | {
"end_col": 45,
"end_line": 28,
"start_col": 14,
"start_line": 28
} |
|
Prims.Tot | val n_limbs:BN.meta_len t_limbs | [
{
"abbrev": true,
"full_module": "Hacl.Bignum.MontArithmetic",
"short_module": "MA"
},
{
"abbrev": true,
"full_module": "Hacl.Bignum.SafeAPI",
"short_module": "BS"
},
{
"abbrev": true,
"full_module": "Hacl.Bignum",
"short_module": "BN"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let n_limbs: BN.meta_len t_limbs = 128ul | val n_limbs:BN.meta_len t_limbs
let n_limbs:BN.meta_len t_limbs = | false | null | false | 128ul | {
"checked_file": "Hacl.Bignum4096_32.fsti.checked",
"dependencies": [
"prims.fst.checked",
"Lib.IntTypes.fsti.checked",
"Hacl.Bignum.SafeAPI.fst.checked",
"Hacl.Bignum.MontArithmetic.fsti.checked",
"Hacl.Bignum.Definitions.fst.checked",
"Hacl.Bignum.Convert.fst.checked",
"Hacl.Bignum.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked"
],
"interface_file": false,
"source_file": "Hacl.Bignum4096_32.fsti"
} | [
"total"
] | [
"FStar.UInt32.__uint_to_t"
] | [] | module Hacl.Bignum4096_32
open FStar.Mul
module BN = Hacl.Bignum
module BS = Hacl.Bignum.SafeAPI
module MA = Hacl.Bignum.MontArithmetic
#set-options "--z3rlimit 50 --fuel 0 --ifuel 0"
inline_for_extraction noextract
let t_limbs: Hacl.Bignum.Definitions.limb_t = Lib.IntTypes.U32 | false | true | Hacl.Bignum4096_32.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 50,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val n_limbs:BN.meta_len t_limbs | [] | Hacl.Bignum4096_32.n_limbs | {
"file_name": "code/bignum/Hacl.Bignum4096_32.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | Hacl.Bignum.meta_len Hacl.Bignum4096_32.t_limbs | {
"end_col": 40,
"end_line": 15,
"start_col": 35,
"start_line": 15
} |
Prims.Tot | [
{
"abbrev": true,
"full_module": "Hacl.Bignum.MontArithmetic",
"short_module": "MA"
},
{
"abbrev": true,
"full_module": "Hacl.Bignum.SafeAPI",
"short_module": "BS"
},
{
"abbrev": true,
"full_module": "Hacl.Bignum",
"short_module": "BN"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let n_bytes = n_limbs `FStar.UInt32.mul` 4ul | let n_bytes = | false | null | false | n_limbs `FStar.UInt32.mul` 4ul | {
"checked_file": "Hacl.Bignum4096_32.fsti.checked",
"dependencies": [
"prims.fst.checked",
"Lib.IntTypes.fsti.checked",
"Hacl.Bignum.SafeAPI.fst.checked",
"Hacl.Bignum.MontArithmetic.fsti.checked",
"Hacl.Bignum.Definitions.fst.checked",
"Hacl.Bignum.Convert.fst.checked",
"Hacl.Bignum.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked"
],
"interface_file": false,
"source_file": "Hacl.Bignum4096_32.fsti"
} | [
"total"
] | [
"FStar.UInt32.mul",
"Hacl.Bignum4096_32.n_limbs",
"FStar.UInt32.__uint_to_t"
] | [] | module Hacl.Bignum4096_32
open FStar.Mul
module BN = Hacl.Bignum
module BS = Hacl.Bignum.SafeAPI
module MA = Hacl.Bignum.MontArithmetic
#set-options "--z3rlimit 50 --fuel 0 --ifuel 0"
inline_for_extraction noextract
let t_limbs: Hacl.Bignum.Definitions.limb_t = Lib.IntTypes.U32
inline_for_extraction noextract
let n_limbs: BN.meta_len t_limbs = 128ul | false | true | Hacl.Bignum4096_32.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 50,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val n_bytes : FStar.UInt32.t | [] | Hacl.Bignum4096_32.n_bytes | {
"file_name": "code/bignum/Hacl.Bignum4096_32.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | FStar.UInt32.t | {
"end_col": 44,
"end_line": 18,
"start_col": 14,
"start_line": 18
} |
|
Prims.Tot | val carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63 | val carry_uint64 (a b: uint_t 64) : Tot (uint_t 64)
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) = | false | null | false | let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63 | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [
"total"
] | [
"FStar.UInt.uint_t",
"Prims.nat",
"FStar.UInt.shift_right",
"FStar.UInt.sub_mod",
"FStar.UInt.logor",
"FStar.UInt.logxor"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"] | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) | [] | FStar.UInt128.carry_uint64 | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: FStar.UInt.uint_t 64 -> b: FStar.UInt.uint_t 64 -> FStar.UInt.uint_t 64 | {
"end_col": 44,
"end_line": 48,
"start_col": 53,
"start_line": 43
} |
Prims.Tot | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63 | let carry_bv (a b: uint_t 64) = | false | null | false | bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63 | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [
"total"
] | [
"FStar.UInt.uint_t",
"FStar.BV.bvshr",
"FStar.BV.bvxor",
"FStar.BV.int2bv",
"FStar.BV.bvor",
"FStar.BV.bvsub",
"FStar.BV.bv_t"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val carry_bv : a: FStar.UInt.uint_t 64 -> b: FStar.UInt.uint_t 64 -> FStar.BV.bv_t 64 | [] | FStar.UInt128.carry_bv | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: FStar.UInt.uint_t 64 -> b: FStar.UInt.uint_t 64 -> FStar.BV.bv_t 64 | {
"end_col": 12,
"end_line": 55,
"start_col": 4,
"start_line": 53
} |
|
Prims.Tot | val lem_ult_2 (a b: uint_t 64) : squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b) | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let lem_ult_2 (a b:uint_t 64)
: squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
= assert (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
by (T.norm [delta_only [`%fact0;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'") | val lem_ult_2 (a b: uint_t 64) : squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
let lem_ult_2 (a b: uint_t 64) : squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b) = | false | null | true | FStar.Tactics.Effect.assert_by_tactic (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
(fun _ ->
();
(T.norm [delta_only [`%fact0; `%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'"
)) | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [
"total"
] | [
"FStar.UInt.uint_t",
"FStar.Tactics.Effect.assert_by_tactic",
"Prims.l_imp",
"Prims.b2t",
"Prims.op_Negation",
"FStar.BV.bvult",
"FStar.BV.int2bv",
"FStar.UInt128.fact0",
"Prims.unit",
"FStar.Tactics.V2.Builtins.set_options",
"FStar.Tactics.V2.Builtins.norm",
"Prims.Cons",
"FStar.Pervasives.norm_step",
"FStar.Pervasives.delta_only",
"Prims.string",
"Prims.Nil",
"Prims.squash"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract
let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63
let carry_uint64_ok (a b:uint_t 64)
: squash (int2bv (carry_uint64 a b) == carry_bv a b)
= _ by (T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl())
let fact1 (a b: uint_t 64) = carry_bv a b == int2bv 1
let fact0 (a b: uint_t 64) = carry_bv a b == int2bv 0
let lem_ult_1 (a b: uint_t 64)
: squash (bvult (int2bv a) (int2bv b) ==> fact1 a b)
= assert (bvult (int2bv a) (int2bv b) ==> fact1 a b)
by (T.norm [delta_only [`%fact1;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'";
smt())
let lem_ult_2 (a b:uint_t 64) | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val lem_ult_2 (a b: uint_t 64) : squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b) | [] | FStar.UInt128.lem_ult_2 | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: FStar.UInt.uint_t 64 -> b: FStar.UInt.uint_t 64
-> Prims.squash (Prims.op_Negation (FStar.BV.bvult (FStar.BV.int2bv a) (FStar.BV.int2bv b)) ==>
FStar.UInt128.fact0 a b) | {
"end_col": 130,
"end_line": 81,
"start_col": 4,
"start_line": 79
} |
Prims.Tot | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let fact1 (a b: uint_t 64) = carry_bv a b == int2bv 1 | let fact1 (a b: uint_t 64) = | false | null | false | carry_bv a b == int2bv 1 | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [
"total"
] | [
"FStar.UInt.uint_t",
"Prims.eq2",
"FStar.BV.bv_t",
"FStar.UInt128.carry_bv",
"FStar.BV.int2bv",
"Prims.logical"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract
let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63
let carry_uint64_ok (a b:uint_t 64)
: squash (int2bv (carry_uint64 a b) == carry_bv a b)
= _ by (T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl()) | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val fact1 : a: FStar.UInt.uint_t 64 -> b: FStar.UInt.uint_t 64 -> Prims.logical | [] | FStar.UInt128.fact1 | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: FStar.UInt.uint_t 64 -> b: FStar.UInt.uint_t 64 -> Prims.logical | {
"end_col": 53,
"end_line": 67,
"start_col": 29,
"start_line": 67
} |
|
FStar.Pervasives.Lemma | val u64_logor_comm (a b: U64.t) : Lemma (U64.logor a b == U64.logor b a) | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let u64_logor_comm (a b:U64.t) : Lemma (U64.logor a b == U64.logor b a) =
UInt.logor_commutative (U64.v a) (U64.v b) | val u64_logor_comm (a b: U64.t) : Lemma (U64.logor a b == U64.logor b a)
let u64_logor_comm (a b: U64.t) : Lemma (U64.logor a b == U64.logor b a) = | false | null | true | UInt.logor_commutative (U64.v a) (U64.v b) | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [
"lemma"
] | [
"FStar.UInt64.t",
"FStar.UInt.logor_commutative",
"FStar.UInt64.n",
"FStar.UInt64.v",
"Prims.unit",
"Prims.l_True",
"Prims.squash",
"Prims.eq2",
"FStar.UInt64.logor",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract
let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63
let carry_uint64_ok (a b:uint_t 64)
: squash (int2bv (carry_uint64 a b) == carry_bv a b)
= _ by (T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl())
let fact1 (a b: uint_t 64) = carry_bv a b == int2bv 1
let fact0 (a b: uint_t 64) = carry_bv a b == int2bv 0
let lem_ult_1 (a b: uint_t 64)
: squash (bvult (int2bv a) (int2bv b) ==> fact1 a b)
= assert (bvult (int2bv a) (int2bv b) ==> fact1 a b)
by (T.norm [delta_only [`%fact1;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'";
smt())
let lem_ult_2 (a b:uint_t 64)
: squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
= assert (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
by (T.norm [delta_only [`%fact0;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'")
let int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b))
= introduce (a < b) ==> (bvult #n (int2bv #n a) (int2bv #n b))
with _ . FStar.BV.int2bv_lemma_ult_1 a b;
introduce (bvult #n (int2bv #n a) (int2bv #n b)) ==> (a < b)
with _ . FStar.BV.int2bv_lemma_ult_2 a b
let lem_ult (a b:uint_t 64)
: Lemma (if a < b
then fact1 a b
else fact0 a b)
= int2bv_ult a b;
lem_ult_1 a b;
lem_ult_2 a b
let constant_time_carry (a b: U64.t) : Tot U64.t =
let open U64 in
// CONSTANT_TIME_CARRY macro
// ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
// 63 = sizeof(a) * 8 - 1
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63ul
let carry_uint64_equiv (a b:UInt64.t)
: Lemma (U64.v (constant_time_carry a b) == carry_uint64 (U64.v a) (U64.v b))
= ()
// This type gets a special treatment in KaRaMeL and its definition is never
// printed in the resulting C file.
type uint128: Type0 = { low: U64.t; high: U64.t }
let t = uint128
let _ = intro_ambient n
let _ = intro_ambient t
[@@ noextract_to "krml"]
let v x = U64.v x.low + (U64.v x.high) * (pow2 64)
let div_mod (x:nat) (k:nat{k > 0}) : Lemma (x / k * k + x % k == x) = ()
let uint_to_t x =
div_mod x (pow2 64);
{ low = U64.uint_to_t (x % (pow2 64));
high = U64.uint_to_t (x / (pow2 64)); }
let v_inj (x1 x2: t): Lemma (requires (v x1 == v x2))
(ensures x1 == x2) =
assert (uint_to_t (v x1) == uint_to_t (v x2));
assert (uint_to_t (v x1) == x1);
assert (uint_to_t (v x2) == x2);
()
(* A weird helper used below... seems like the native encoding of
bitvectors may be making these proofs much harder than they should be? *)
let bv2int_fun (#n:pos) (a b : bv_t n)
: Lemma (requires a == b)
(ensures bv2int a == bv2int b)
= ()
(* This proof is quite brittle. It has a bunch of annotations to get
decent verification performance. *)
let constant_time_carry_ok (a b:U64.t)
: Lemma (constant_time_carry a b ==
(if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0))
= calc (==) {
U64.v (constant_time_carry a b);
(==) { carry_uint64_equiv a b }
carry_uint64 (U64.v a) (U64.v b);
(==) { inverse_num_lemma (carry_uint64 (U64.v a) (U64.v b)) }
bv2int (int2bv (carry_uint64 (U64.v a) (U64.v b)));
(==) { carry_uint64_ok (U64.v a) (U64.v b);
bv2int_fun (int2bv (carry_uint64 (U64.v a) (U64.v b))) (carry_bv (U64.v a) (U64.v b));
()
}
bv2int (carry_bv (U64.v a) (U64.v b));
(==) {
lem_ult (U64.v a) (U64.v b);
bv2int_fun (carry_bv (U64.v a) (U64.v b)) (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
}
bv2int
(if U64.v a < U64.v b
then int2bv 1
else int2bv 0);
};
assert (
bv2int (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
== U64.v (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)) by (T.norm []);
U64.v_inj (constant_time_carry a b) (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)
let carry (a b: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r == (if U64.v a < U64.v b then 1 else 0))) =
constant_time_carry_ok a b;
constant_time_carry a b
let carry_sum_ok (a b:U64.t) :
Lemma (U64.v (carry (U64.add_mod a b) b) == (U64.v a + U64.v b) / (pow2 64)) = ()
let add (a b: t) : Pure t
(requires (v a + v b < pow2 128))
(ensures (fun r -> v a + v b = v r)) =
let l = U64.add_mod a.low b.low in
carry_sum_ok a.low b.low;
{ low = l;
high = U64.add (U64.add a.high b.high) (carry l b.low); }
let add_underspec (a b: t) =
let l = U64.add_mod a.low b.low in
begin
if v a + v b < pow2 128
then carry_sum_ok a.low b.low
else ()
end;
{ low = l;
high = U64.add_underspec (U64.add_underspec a.high b.high) (carry l b.low); }
val mod_mod: a:nat -> k:nat{k>0} -> k':nat{k'>0} ->
Lemma ((a % k) % (k'*k) == a % k)
let mod_mod a k k' =
assert (a % k < k);
assert (a % k < k' * k)
let mod_spec (a:nat) (k:nat{k > 0}) :
Lemma (a % k == a - a / k * k) = ()
val div_product : n:nat -> m1:nat{m1>0} -> m2:nat{m2>0} ->
Lemma (n / (m1*m2) == (n / m1) / m2)
let div_product n m1 m2 =
Math.division_multiplication_lemma n m1 m2
val mul_div_cancel : n:nat -> k:nat{k>0} ->
Lemma ((n * k) / k == n)
let mul_div_cancel n k =
Math.cancel_mul_div n k
val mod_mul: n:nat -> k1:pos -> k2:pos ->
Lemma ((n % k2) * k1 == (n * k1) % (k1*k2))
let mod_mul n k1 k2 =
Math.modulo_scale_lemma n k1 k2
let mod_spec_rew_n (n:nat) (k:nat{k > 0}) :
Lemma (n == n / k * k + n % k) = mod_spec n k
val mod_add: n1:nat -> n2:nat -> k:nat{k > 0} ->
Lemma ((n1 % k + n2 % k) % k == (n1 + n2) % k)
let mod_add n1 n2 k = Math.modulo_distributivity n1 n2 k
val mod_add_small: n1:nat -> n2:nat -> k:nat{k > 0} -> Lemma
(requires (n1 % k + n2 % k < k))
(ensures (n1 % k + n2 % k == (n1 + n2) % k))
let mod_add_small n1 n2 k =
mod_add n1 n2 k;
Math.small_modulo_lemma_1 (n1%k + n2%k) k
// This proof is pretty stable with the calc proof, but it can fail
// ~1% of the times, so add a retry.
#push-options "--split_queries no --z3rlimit 20 --retry 5"
let add_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a + v b) % pow2 128 = v r)) =
let l = U64.add_mod a.low b.low in
let r = { low = l;
high = U64.add_mod (U64.add_mod a.high b.high) (carry l b.low)} in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
let b_l = U64.v b.low in
let b_h = U64.v b.high in
carry_sum_ok a.low b.low;
Math.lemma_mod_plus_distr_l (a_h + b_h) ((a_l + b_l) / (pow2 64)) (pow2 64);
calc (==) {
U64.v r.high * pow2 64;
== {}
((a_h + b_h + (a_l + b_l) / (pow2 64)) % pow2 64) * pow2 64;
== { mod_mul (a_h + b_h + (a_l + b_l) / (pow2 64)) (pow2 64) (pow2 64) }
((a_h + b_h + (a_l + b_l) / (pow2 64)) * pow2 64) % (pow2 64 * pow2 64);
== {}
((a_h + b_h + (a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
== {}
(a_h * pow2 64 + b_h * pow2 64 + ((a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
};
assert (U64.v r.low == (U64.v r.low) % pow2 128);
mod_add_small (a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64))
((a_l + b_l) % (pow2 64))
(pow2 128);
assert (U64.v r.low + U64.v r.high * pow2 64 ==
(a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64) + (a_l + b_l) % (pow2 64)) % pow2 128);
mod_spec_rew_n (a_l + b_l) (pow2 64);
assert (v r ==
(a_h * pow2 64 +
b_h * pow2 64 +
a_l + b_l) % pow2 128);
assert_spinoff ((v a + v b) % pow2 128 = v r);
r
#pop-options
#push-options "--retry 5"
let sub (a b: t) : Pure t
(requires (v a - v b >= 0))
(ensures (fun r -> v r = v a - v b)) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub (U64.sub a.high b.high) (carry a.low l); }
#pop-options
let sub_underspec (a b: t) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_underspec (U64.sub_underspec a.high b.high) (carry a.low l); }
let sub_mod_impl (a b: t) : t =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_mod (U64.sub_mod a.high b.high) (carry a.low l); }
#push-options "--retry 10" // flaky
let sub_mod_pos_ok (a b:t) : Lemma
(requires (v a - v b >= 0))
(ensures (v (sub_mod_impl a b) = v a - v b)) =
assert (sub a b == sub_mod_impl a b);
()
#pop-options
val u64_diff_wrap : a:U64.t -> b:U64.t ->
Lemma (requires (U64.v a < U64.v b))
(ensures (U64.v (U64.sub_mod a b) == U64.v a - U64.v b + pow2 64))
let u64_diff_wrap a b = ()
#push-options "--z3rlimit 20"
val sub_mod_wrap1_ok : a:t -> b:t -> Lemma
(requires (v a - v b < 0 /\ U64.v a.low < U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128))
#push-options "--retry 10"
let sub_mod_wrap1_ok a b =
// carry == 1 and subtraction in low wraps
let l = U64.sub_mod a.low b.low in
assert (U64.v (carry a.low l) == 1);
u64_diff_wrap a.low b.low;
// a.high <= b.high since v a < v b;
// case split on equality and strictly less
if U64.v a.high = U64.v b.high then ()
else begin
u64_diff_wrap a.high b.high;
()
end
#pop-options
let sum_lt (a1 a2 b1 b2:nat) : Lemma
(requires (a1 + a2 < b1 + b2 /\ a1 >= b1))
(ensures (a2 < b2)) = ()
let sub_mod_wrap2_ok (a b:t) : Lemma
(requires (v a - v b < 0 /\ U64.v a.low >= U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
// carry == 0, subtraction in low is exact, but subtraction in high
// must wrap since v a < v b
let l = U64.sub_mod a.low b.low in
let r = sub_mod_impl a b in
assert (U64.v l == U64.v a.low - U64.v b.low);
assert (U64.v (carry a.low l) == 0);
sum_lt (U64.v a.low) (U64.v a.high * pow2 64) (U64.v b.low) (U64.v b.high * pow2 64);
assert (U64.v (U64.sub_mod a.high b.high) == U64.v a.high - U64.v b.high + pow2 64);
()
let sub_mod_wrap_ok (a b:t) : Lemma
(requires (v a - v b < 0))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
if U64.v a.low < U64.v b.low
then sub_mod_wrap1_ok a b
else sub_mod_wrap2_ok a b
#push-options "--z3rlimit 40"
let sub_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = (v a - v b) % pow2 128)) =
(if v a - v b >= 0
then sub_mod_pos_ok a b
else sub_mod_wrap_ok a b);
sub_mod_impl a b
#pop-options
val shift_bound : #n:nat -> num:UInt.uint_t n -> n':nat ->
Lemma (num * pow2 n' <= pow2 (n'+n) - pow2 n')
let shift_bound #n num n' =
Math.lemma_mult_le_right (pow2 n') num (pow2 n - 1);
Math.distributivity_sub_left (pow2 n) 1 (pow2 n');
Math.pow2_plus n' n
val append_uint : #n1:nat -> #n2:nat -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 -> UInt.uint_t (n1+n2)
let append_uint #n1 #n2 num1 num2 =
shift_bound num2 n1;
num1 + num2 * pow2 n1
val to_vec_append : #n1:nat{n1 > 0} -> #n2:nat{n2 > 0} -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 ->
Lemma (UInt.to_vec (append_uint num1 num2) == Seq.append (UInt.to_vec num2) (UInt.to_vec num1))
let to_vec_append #n1 #n2 num1 num2 =
UInt.append_lemma (UInt.to_vec num2) (UInt.to_vec num1)
let vec128 (a: t) : BV.bv_t 128 = UInt.to_vec #128 (v a)
let vec64 (a: U64.t) : BV.bv_t 64 = UInt.to_vec (U64.v a)
let to_vec_v (a: t) :
Lemma (vec128 a == Seq.append (vec64 a.high) (vec64 a.low)) =
to_vec_append (U64.v a.low) (U64.v a.high)
val logand_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2) ==
BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2))
(BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logand #128 (v a) (v b))) =
let r = { low = U64.logand a.low b.low;
high = U64.logand a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logand_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logand_vec (vec128 a) (vec128 b));
r
val logxor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2) ==
BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2))
(BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logxor #128 (v a) (v b))) =
let r = { low = U64.logxor a.low b.low;
high = U64.logxor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logxor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logxor_vec (vec128 a) (vec128 b));
r
val logor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2) ==
BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2))
(BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logor #128 (v a) (v b))) =
let r = { low = U64.logor a.low b.low;
high = U64.logor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logor_vec (vec128 a) (vec128 b));
r
val lognot_vec_append (#n1 #n2: pos) (a1: BV.bv_t n1) (a2: BV.bv_t n2) :
Lemma (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2) ==
BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot_vec_append #n1 #n2 a1 a2 =
Seq.lemma_eq_intro (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2))
(BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot (a: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.lognot #128 (v a))) =
let r = { low = U64.lognot a.low;
high = U64.lognot a.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
lognot_vec_append (vec64 a.high) (vec64 a.low);
to_vec_v a;
assert (vec128 r == BV.lognot_vec (vec128 a));
r
let mod_mul_cancel (n:nat) (k:nat{k > 0}) :
Lemma ((n * k) % k == 0) =
mod_spec (n * k) k;
mul_div_cancel n k;
()
let shift_past_mod (n:nat) (k1:nat) (k2:nat{k2 >= k1}) :
Lemma ((n * pow2 k2) % pow2 k1 == 0) =
assert (k2 == (k2 - k1) + k1);
Math.pow2_plus (k2 - k1) k1;
Math.paren_mul_right n (pow2 (k2 - k1)) (pow2 k1);
mod_mul_cancel (n * pow2 (k2 - k1)) (pow2 k1)
val mod_double: a:nat -> k:nat{k>0} ->
Lemma (a % k % k == a % k)
let mod_double a k =
mod_mod a k 1
let shift_left_large_val (#n1:nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s:nat) :
Lemma ((a1 + a2 * pow2 n1) * pow2 s == (a1 * pow2 s + a2 * pow2 (n1+s))) =
Math.distributivity_add_left a1 (a2 * pow2 n1) (pow2 s);
Math.paren_mul_right a2 (pow2 n1) (pow2 s);
Math.pow2_plus n1 s
#push-options "--z3rlimit 40"
let shift_left_large_lemma (#n1: nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s: nat{s >= n2}) :
Lemma (((a1 + a2 * pow2 n1) * pow2 s) % pow2 (n1+n2) ==
(a1 * pow2 s) % pow2 (n1+n2)) =
shift_left_large_val a1 a2 s;
mod_add (a1 * pow2 s) (a2 * pow2 (n1+s)) (pow2 (n1+n2));
shift_past_mod a2 (n1+n2) (n1+s);
mod_double (a1 * pow2 s) (pow2 (n1+n2));
()
#pop-options
val shift_left_large_lemma_t : a:t -> s:nat ->
Lemma (requires (s >= 64))
(ensures ((v a * pow2 s) % pow2 128 ==
(U64.v a.low * pow2 s) % pow2 128))
let shift_left_large_lemma_t a s =
shift_left_large_lemma #64 #64 (U64.v a.low) (U64.v a.high) s
private let u32_64: n:U32.t{U32.v n == 64} = U32.uint_to_t 64
val div_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} -> Lemma
(requires True)
(ensures (a / pow2 (n1 - n2) == a * pow2 n2 / pow2 n1))
let div_pow2_diff a n1 n2 =
Math.pow2_plus n2 (n1-n2);
assert (a * pow2 n2 / pow2 n1 == a * pow2 n2 / (pow2 n2 * pow2 (n1 - n2)));
div_product (a * pow2 n2) (pow2 n2) (pow2 (n1-n2));
mul_div_cancel a (pow2 n2)
val mod_mul_pow2 : n:nat -> e1:nat -> e2:nat ->
Lemma (n % pow2 e1 * pow2 e2 <= pow2 (e1+e2) - pow2 e2)
let mod_mul_pow2 n e1 e2 =
Math.lemma_mod_lt n (pow2 e1);
Math.lemma_mult_le_right (pow2 e2) (n % pow2 e1) (pow2 e1 - 1);
assert (n % pow2 e1 * pow2 e2 <= pow2 e1 * pow2 e2 - pow2 e2);
Math.pow2_plus e1 e2
let pow2_div_bound #b (n:UInt.uint_t b) (s:nat{s <= b}) :
Lemma (n / pow2 s < pow2 (b - s)) =
Math.lemma_div_lt n b s
#push-options "--smtencoding.l_arith_repr native --z3rlimit 40"
let add_u64_shift_left (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r = (U64.v hi * pow2 (U32.v s)) % pow2 64 + U64.v lo / pow2 (64 - U32.v s))) =
let high = U64.shift_left hi s in
let low = U64.shift_right lo (U32.sub u32_64 s) in
let s = U32.v s in
let high_n = U64.v hi % pow2 (64 - s) * pow2 s in
let low_n = U64.v lo / pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 s) (pow2 (64-s));
assert (U64.v high == high_n);
assert (U64.v low == low_n);
pow2_div_bound (U64.v lo) (64-s);
assert (low_n < pow2 s);
mod_mul_pow2 (U64.v hi) (64 - s) s;
U64.add high low
#pop-options
let div_plus_multiple (a:nat) (b:nat) (k:pos) :
Lemma (requires (a < k))
(ensures ((a + b * k) / k == b)) =
Math.division_addition_lemma a k b;
Math.small_division_lemma_1 a k
val div_add_small: n:nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (k1*m / (k1*k2) == (n + k1*m) / (k1*k2)))
let div_add_small n m k1 k2 =
div_product (k1*m) k1 k2;
div_product (n+k1*m) k1 k2;
mul_div_cancel m k1;
assert (k1*m/k1 == m);
div_plus_multiple n m k1
val add_mod_small: n: nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (n + (k1 * m) % (k1 * k2) ==
(n + k1 * m) % (k1 * k2)))
let add_mod_small n m k1 k2 =
mod_spec (k1 * m) (k1 * k2);
mod_spec (n + k1 * m) (k1 * k2);
div_add_small n m k1 k2
let mod_then_mul_64 (n:nat) : Lemma (n % pow2 64 * pow2 64 == n * pow2 64 % pow2 128) =
Math.pow2_plus 64 64;
mod_mul n (pow2 64) (pow2 64)
let mul_abc_to_acb (a b c: int) : Lemma (a * b * c == a * c * b) = ()
let add_u64_shift_left_respec (hi lo:U64.t) (s:U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r ->
U64.v r * pow2 64 ==
(U64.v hi * pow2 64) * pow2 (U32.v s) % pow2 128 +
U64.v lo * pow2 (U32.v s) / pow2 64 * pow2 64)) =
let r = add_u64_shift_left hi lo s in
let hi = U64.v hi in
let lo = U64.v lo in
let s = U32.v s in
// spec of add_u64_shift_left
assert (U64.v r == hi * pow2 s % pow2 64 + lo / pow2 (64 - s));
Math.distributivity_add_left (hi * pow2 s % pow2 64) (lo / pow2 (64-s)) (pow2 64);
mod_then_mul_64 (hi * pow2 s);
assert (hi * pow2 s % pow2 64 * pow2 64 == (hi * pow2 s * pow2 64) % pow2 128);
div_pow2_diff lo 64 s;
assert (lo / pow2 (64-s) == lo * pow2 s / pow2 64);
assert (U64.v r * pow2 64 == hi * pow2 s * pow2 64 % pow2 128 + lo * pow2 s / pow2 64 * pow2 64);
mul_abc_to_acb hi (pow2 s) (pow2 64);
r
val add_mod_small' : n:nat -> m:nat -> k:pos ->
Lemma (requires (n + m % k < k))
(ensures (n + m % k == (n + m) % k))
let add_mod_small' n m k =
Math.lemma_mod_lt (n + m % k) k;
Math.modulo_lemma n k;
mod_add n m k
#push-options "--retry 5"
let shift_t_val (a: t) (s: nat) :
Lemma (v a * pow2 s == U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s)) =
Math.pow2_plus 64 s;
()
#pop-options
val mul_mod_bound : n:nat -> s1:nat -> s2:nat{s2>=s1} ->
Lemma (n * pow2 s1 % pow2 s2 <= pow2 s2 - pow2 s1)
#push-options "--retry 5"
let mul_mod_bound n s1 s2 =
// n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1
// n % pow2 (s2-s1) <= pow2 (s2-s1) - 1
// n % pow2 (s2-s1) * pow2 s1 <= pow2 s2 - pow2 s1
mod_mul n (pow2 s1) (pow2 (s2-s1));
// assert (n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1);
Math.lemma_mod_lt n (pow2 (s2-s1));
Math.lemma_mult_le_right (pow2 s1) (n % pow2 (s2-s1)) (pow2 (s2-s1) - 1);
Math.pow2_plus (s2-s1) s1
#pop-options
let add_lt_le (a a' b b': int) :
Lemma (requires (a < a' /\ b <= b'))
(ensures (a + b < a' + b')) = ()
let u64_pow2_bound (a: UInt.uint_t 64) (s: nat) :
Lemma (a * pow2 s < pow2 (64+s)) =
Math.pow2_plus 64 s;
Math.lemma_mult_le_right (pow2 s) a (pow2 64)
let shift_t_mod_val' (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s) % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
u64_pow2_bound a_l s;
mul_mod_bound a_h (64+s) 128;
// assert (a_h * pow2 (64+s) % pow2 128 <= pow2 128 - pow2 (64+s));
add_lt_le (a_l * pow2 s) (pow2 (64+s)) (a_h * pow2 (64+s) % pow2 128) (pow2 128 - pow2 (64+s));
add_mod_small' (a_l * pow2 s) (a_h * pow2 (64+s)) (pow2 128);
shift_t_val a s;
()
let shift_t_mod_val (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + (U64.v a.high * pow2 64) * pow2 s % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
shift_t_mod_val' a s;
Math.pow2_plus 64 s;
Math.paren_mul_right a_h (pow2 64) (pow2 s);
()
#push-options "--z3rlimit 20"
let shift_left_small (a: t) (s: U32.t) : Pure t
(requires (U32.v s < 64))
(ensures (fun r -> v r = (v a * pow2 (U32.v s)) % pow2 128)) =
if U32.eq s 0ul then a
else
let r = { low = U64.shift_left a.low s;
high = add_u64_shift_left_respec a.high a.low s; } in
let s = U32.v s in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
mod_spec_rew_n (a_l * pow2 s) (pow2 64);
shift_t_mod_val a s;
r
#pop-options
val shift_left_large : a:t -> s:U32.t{U32.v s >= 64 /\ U32.v s < 128} ->
r:t{v r = (v a * pow2 (U32.v s)) % pow2 128}
#push-options "--z3rlimit 50 --retry 5" // sporadically fails
let shift_left_large a s =
let h_shift = U32.sub s u32_64 in
assert (U32.v h_shift < 64);
let r = { low = U64.uint_to_t 0;
high = U64.shift_left a.low h_shift; } in
assert (U64.v r.high == (U64.v a.low * pow2 (U32.v s - 64)) % pow2 64);
mod_mul (U64.v a.low * pow2 (U32.v s - 64)) (pow2 64) (pow2 64);
Math.pow2_plus (U32.v s - 64) 64;
assert (U64.v r.high * pow2 64 == (U64.v a.low * pow2 (U32.v s)) % pow2 128);
shift_left_large_lemma_t a (U32.v s);
r
#pop-options
let shift_left a s =
if (U32.lt s u32_64) then shift_left_small a s
else shift_left_large a s
let add_u64_shift_right (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r == U64.v lo / pow2 (U32.v s) +
U64.v hi * pow2 (64 - U32.v s) % pow2 64)) =
let low = U64.shift_right lo s in
let high = U64.shift_left hi (U32.sub u32_64 s) in
let s = U32.v s in
let low_n = U64.v lo / pow2 s in
let high_n = U64.v hi % pow2 s * pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 (64-s)) (pow2 s);
assert (U64.v high == high_n);
pow2_div_bound (U64.v lo) s;
assert (low_n < pow2 (64 - s));
mod_mul_pow2 (U64.v hi) s (64 - s);
U64.add low high
val mul_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} ->
Lemma (a * pow2 (n1 - n2) == a * pow2 n1 / pow2 n2)
let mul_pow2_diff a n1 n2 =
Math.paren_mul_right a (pow2 (n1-n2)) (pow2 n2);
mul_div_cancel (a * pow2 (n1 - n2)) (pow2 n2);
Math.pow2_plus (n1 - n2) n2
let add_u64_shift_right_respec (hi lo:U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r == U64.v lo / pow2 (U32.v s) +
U64.v hi * pow2 64 / pow2 (U32.v s) % pow2 64)) =
let r = add_u64_shift_right hi lo s in
let s = U32.v s in
mul_pow2_diff (U64.v hi) 64 s;
r
let mul_div_spec (n:nat) (k:pos) : Lemma (n / k * k == n - n % k) = ()
let mul_distr_sub (n1 n2:nat) (k:nat) : Lemma ((n1 - n2) * k == n1 * k - n2 * k) = ()
val div_product_comm : n1:nat -> k1:pos -> k2:pos ->
Lemma (n1 / k1 / k2 == n1 / k2 / k1)
let div_product_comm n1 k1 k2 =
div_product n1 k1 k2;
div_product n1 k2 k1
val shift_right_reconstruct : a_h:UInt.uint_t 64 -> s:nat{s < 64} ->
Lemma (a_h * pow2 (64-s) == a_h / pow2 s * pow2 64 + a_h * pow2 64 / pow2 s % pow2 64)
let shift_right_reconstruct a_h s =
mul_pow2_diff a_h 64 s;
mod_spec_rew_n (a_h * pow2 (64-s)) (pow2 64);
div_product_comm (a_h * pow2 64) (pow2 s) (pow2 64);
mul_div_cancel a_h (pow2 64);
assert (a_h / pow2 s * pow2 64 == a_h * pow2 64 / pow2 s / pow2 64 * pow2 64);
()
val u128_div_pow2 (a: t) (s:nat{s < 64}) :
Lemma (v a / pow2 s == U64.v a.low / pow2 s + U64.v a.high * pow2 (64 - s))
let u128_div_pow2 a s =
Math.pow2_plus (64-s) s;
Math.paren_mul_right (U64.v a.high) (pow2 (64-s)) (pow2 s);
Math.division_addition_lemma (U64.v a.low) (pow2 s) (U64.v a.high * pow2 (64 - s))
let shift_right_small (a: t) (s: U32.t{U32.v s < 64}) : Pure t
(requires True)
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
if U32.eq s 0ul then a
else
let r = { low = add_u64_shift_right_respec a.high a.low s;
high = U64.shift_right a.high s; } in
let a_h = U64.v a.high in
let a_l = U64.v a.low in
let s = U32.v s in
shift_right_reconstruct a_h s;
assert (v r == a_h * pow2 (64-s) + a_l / pow2 s);
u128_div_pow2 a s;
r
let shift_right_large (a: t) (s: U32.t{U32.v s >= 64 /\ U32.v s < 128}) : Pure t
(requires True)
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
let r = { high = U64.uint_to_t 0;
low = U64.shift_right a.high (U32.sub s u32_64); } in
let s = U32.v s in
Math.pow2_plus 64 (s - 64);
div_product (v a) (pow2 64) (pow2 (s - 64));
assert (v a / pow2 s == v a / pow2 64 / pow2 (s - 64));
div_plus_multiple (U64.v a.low) (U64.v a.high) (pow2 64);
r
let shift_right (a: t) (s: U32.t) : Pure t
(requires (U32.v s < 128))
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
if U32.lt s u32_64
then shift_right_small a s
else shift_right_large a s
let eq (a b:t) = U64.eq a.low b.low && U64.eq a.high b.high
let gt (a b:t) = U64.gt a.high b.high ||
(U64.eq a.high b.high && U64.gt a.low b.low)
let lt (a b:t) = U64.lt a.high b.high ||
(U64.eq a.high b.high && U64.lt a.low b.low)
let gte (a b:t) = U64.gt a.high b.high ||
(U64.eq a.high b.high && U64.gte a.low b.low)
let lte (a b:t) = U64.lt a.high b.high ||
(U64.eq a.high b.high && U64.lte a.low b.low)
let u64_logand_comm (a b:U64.t) : Lemma (U64.logand a b == U64.logand b a) =
UInt.logand_commutative (U64.v a) (U64.v b)
val u64_and_0 (a b:U64.t) :
Lemma (U64.v b = 0 ==> U64.v (U64.logand a b) = 0)
[SMTPat (U64.logand a b)]
let u64_and_0 a b = UInt.logand_lemma_1 (U64.v a)
let u64_0_and (a b:U64.t) :
Lemma (U64.v a = 0 ==> U64.v (U64.logand a b) = 0)
[SMTPat (U64.logand a b)] =
u64_logand_comm a b
val u64_1s_and (a b:U64.t) :
Lemma (U64.v a = pow2 64 - 1 /\
U64.v b = pow2 64 - 1 ==> U64.v (U64.logand a b) = pow2 64 - 1)
[SMTPat (U64.logand a b)]
let u64_1s_and a b = UInt.logand_lemma_2 (U64.v a)
let eq_mask (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a = v b ==> v r = pow2 128 - 1) /\ (v a <> v b ==> v r = 0))) =
let mask = U64.logand (U64.eq_mask a.low b.low)
(U64.eq_mask a.high b.high) in
{ low = mask; high = mask; }
private let gte_characterization (a b: t) :
Lemma (v a >= v b ==>
U64.v a.high > U64.v b.high \/
(U64.v a.high = U64.v b.high /\ U64.v a.low >= U64.v b.low)) = ()
private let lt_characterization (a b: t) :
Lemma (v a < v b ==>
U64.v a.high < U64.v b.high \/
(U64.v a.high = U64.v b.high /\ U64.v a.low < U64.v b.low)) = () | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val u64_logor_comm (a b: U64.t) : Lemma (U64.logor a b == U64.logor b a) | [] | FStar.UInt128.u64_logor_comm | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: FStar.UInt64.t -> b: FStar.UInt64.t
-> FStar.Pervasives.Lemma (ensures FStar.UInt64.logor a b == FStar.UInt64.logor b a) | {
"end_col": 44,
"end_line": 854,
"start_col": 2,
"start_line": 854
} |
FStar.Pervasives.Lemma | val u64_1s_and (a b:U64.t) :
Lemma (U64.v a = pow2 64 - 1 /\
U64.v b = pow2 64 - 1 ==> U64.v (U64.logand a b) = pow2 64 - 1)
[SMTPat (U64.logand a b)] | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let u64_1s_and a b = UInt.logand_lemma_2 (U64.v a) | val u64_1s_and (a b:U64.t) :
Lemma (U64.v a = pow2 64 - 1 /\
U64.v b = pow2 64 - 1 ==> U64.v (U64.logand a b) = pow2 64 - 1)
[SMTPat (U64.logand a b)]
let u64_1s_and a b = | false | null | true | UInt.logand_lemma_2 (U64.v a) | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [
"lemma"
] | [
"FStar.UInt64.t",
"FStar.UInt.logand_lemma_2",
"FStar.UInt64.n",
"FStar.UInt64.v",
"Prims.unit"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract
let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63
let carry_uint64_ok (a b:uint_t 64)
: squash (int2bv (carry_uint64 a b) == carry_bv a b)
= _ by (T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl())
let fact1 (a b: uint_t 64) = carry_bv a b == int2bv 1
let fact0 (a b: uint_t 64) = carry_bv a b == int2bv 0
let lem_ult_1 (a b: uint_t 64)
: squash (bvult (int2bv a) (int2bv b) ==> fact1 a b)
= assert (bvult (int2bv a) (int2bv b) ==> fact1 a b)
by (T.norm [delta_only [`%fact1;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'";
smt())
let lem_ult_2 (a b:uint_t 64)
: squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
= assert (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
by (T.norm [delta_only [`%fact0;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'")
let int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b))
= introduce (a < b) ==> (bvult #n (int2bv #n a) (int2bv #n b))
with _ . FStar.BV.int2bv_lemma_ult_1 a b;
introduce (bvult #n (int2bv #n a) (int2bv #n b)) ==> (a < b)
with _ . FStar.BV.int2bv_lemma_ult_2 a b
let lem_ult (a b:uint_t 64)
: Lemma (if a < b
then fact1 a b
else fact0 a b)
= int2bv_ult a b;
lem_ult_1 a b;
lem_ult_2 a b
let constant_time_carry (a b: U64.t) : Tot U64.t =
let open U64 in
// CONSTANT_TIME_CARRY macro
// ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
// 63 = sizeof(a) * 8 - 1
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63ul
let carry_uint64_equiv (a b:UInt64.t)
: Lemma (U64.v (constant_time_carry a b) == carry_uint64 (U64.v a) (U64.v b))
= ()
// This type gets a special treatment in KaRaMeL and its definition is never
// printed in the resulting C file.
type uint128: Type0 = { low: U64.t; high: U64.t }
let t = uint128
let _ = intro_ambient n
let _ = intro_ambient t
[@@ noextract_to "krml"]
let v x = U64.v x.low + (U64.v x.high) * (pow2 64)
let div_mod (x:nat) (k:nat{k > 0}) : Lemma (x / k * k + x % k == x) = ()
let uint_to_t x =
div_mod x (pow2 64);
{ low = U64.uint_to_t (x % (pow2 64));
high = U64.uint_to_t (x / (pow2 64)); }
let v_inj (x1 x2: t): Lemma (requires (v x1 == v x2))
(ensures x1 == x2) =
assert (uint_to_t (v x1) == uint_to_t (v x2));
assert (uint_to_t (v x1) == x1);
assert (uint_to_t (v x2) == x2);
()
(* A weird helper used below... seems like the native encoding of
bitvectors may be making these proofs much harder than they should be? *)
let bv2int_fun (#n:pos) (a b : bv_t n)
: Lemma (requires a == b)
(ensures bv2int a == bv2int b)
= ()
(* This proof is quite brittle. It has a bunch of annotations to get
decent verification performance. *)
let constant_time_carry_ok (a b:U64.t)
: Lemma (constant_time_carry a b ==
(if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0))
= calc (==) {
U64.v (constant_time_carry a b);
(==) { carry_uint64_equiv a b }
carry_uint64 (U64.v a) (U64.v b);
(==) { inverse_num_lemma (carry_uint64 (U64.v a) (U64.v b)) }
bv2int (int2bv (carry_uint64 (U64.v a) (U64.v b)));
(==) { carry_uint64_ok (U64.v a) (U64.v b);
bv2int_fun (int2bv (carry_uint64 (U64.v a) (U64.v b))) (carry_bv (U64.v a) (U64.v b));
()
}
bv2int (carry_bv (U64.v a) (U64.v b));
(==) {
lem_ult (U64.v a) (U64.v b);
bv2int_fun (carry_bv (U64.v a) (U64.v b)) (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
}
bv2int
(if U64.v a < U64.v b
then int2bv 1
else int2bv 0);
};
assert (
bv2int (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
== U64.v (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)) by (T.norm []);
U64.v_inj (constant_time_carry a b) (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)
let carry (a b: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r == (if U64.v a < U64.v b then 1 else 0))) =
constant_time_carry_ok a b;
constant_time_carry a b
let carry_sum_ok (a b:U64.t) :
Lemma (U64.v (carry (U64.add_mod a b) b) == (U64.v a + U64.v b) / (pow2 64)) = ()
let add (a b: t) : Pure t
(requires (v a + v b < pow2 128))
(ensures (fun r -> v a + v b = v r)) =
let l = U64.add_mod a.low b.low in
carry_sum_ok a.low b.low;
{ low = l;
high = U64.add (U64.add a.high b.high) (carry l b.low); }
let add_underspec (a b: t) =
let l = U64.add_mod a.low b.low in
begin
if v a + v b < pow2 128
then carry_sum_ok a.low b.low
else ()
end;
{ low = l;
high = U64.add_underspec (U64.add_underspec a.high b.high) (carry l b.low); }
val mod_mod: a:nat -> k:nat{k>0} -> k':nat{k'>0} ->
Lemma ((a % k) % (k'*k) == a % k)
let mod_mod a k k' =
assert (a % k < k);
assert (a % k < k' * k)
let mod_spec (a:nat) (k:nat{k > 0}) :
Lemma (a % k == a - a / k * k) = ()
val div_product : n:nat -> m1:nat{m1>0} -> m2:nat{m2>0} ->
Lemma (n / (m1*m2) == (n / m1) / m2)
let div_product n m1 m2 =
Math.division_multiplication_lemma n m1 m2
val mul_div_cancel : n:nat -> k:nat{k>0} ->
Lemma ((n * k) / k == n)
let mul_div_cancel n k =
Math.cancel_mul_div n k
val mod_mul: n:nat -> k1:pos -> k2:pos ->
Lemma ((n % k2) * k1 == (n * k1) % (k1*k2))
let mod_mul n k1 k2 =
Math.modulo_scale_lemma n k1 k2
let mod_spec_rew_n (n:nat) (k:nat{k > 0}) :
Lemma (n == n / k * k + n % k) = mod_spec n k
val mod_add: n1:nat -> n2:nat -> k:nat{k > 0} ->
Lemma ((n1 % k + n2 % k) % k == (n1 + n2) % k)
let mod_add n1 n2 k = Math.modulo_distributivity n1 n2 k
val mod_add_small: n1:nat -> n2:nat -> k:nat{k > 0} -> Lemma
(requires (n1 % k + n2 % k < k))
(ensures (n1 % k + n2 % k == (n1 + n2) % k))
let mod_add_small n1 n2 k =
mod_add n1 n2 k;
Math.small_modulo_lemma_1 (n1%k + n2%k) k
// This proof is pretty stable with the calc proof, but it can fail
// ~1% of the times, so add a retry.
#push-options "--split_queries no --z3rlimit 20 --retry 5"
let add_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a + v b) % pow2 128 = v r)) =
let l = U64.add_mod a.low b.low in
let r = { low = l;
high = U64.add_mod (U64.add_mod a.high b.high) (carry l b.low)} in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
let b_l = U64.v b.low in
let b_h = U64.v b.high in
carry_sum_ok a.low b.low;
Math.lemma_mod_plus_distr_l (a_h + b_h) ((a_l + b_l) / (pow2 64)) (pow2 64);
calc (==) {
U64.v r.high * pow2 64;
== {}
((a_h + b_h + (a_l + b_l) / (pow2 64)) % pow2 64) * pow2 64;
== { mod_mul (a_h + b_h + (a_l + b_l) / (pow2 64)) (pow2 64) (pow2 64) }
((a_h + b_h + (a_l + b_l) / (pow2 64)) * pow2 64) % (pow2 64 * pow2 64);
== {}
((a_h + b_h + (a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
== {}
(a_h * pow2 64 + b_h * pow2 64 + ((a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
};
assert (U64.v r.low == (U64.v r.low) % pow2 128);
mod_add_small (a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64))
((a_l + b_l) % (pow2 64))
(pow2 128);
assert (U64.v r.low + U64.v r.high * pow2 64 ==
(a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64) + (a_l + b_l) % (pow2 64)) % pow2 128);
mod_spec_rew_n (a_l + b_l) (pow2 64);
assert (v r ==
(a_h * pow2 64 +
b_h * pow2 64 +
a_l + b_l) % pow2 128);
assert_spinoff ((v a + v b) % pow2 128 = v r);
r
#pop-options
#push-options "--retry 5"
let sub (a b: t) : Pure t
(requires (v a - v b >= 0))
(ensures (fun r -> v r = v a - v b)) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub (U64.sub a.high b.high) (carry a.low l); }
#pop-options
let sub_underspec (a b: t) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_underspec (U64.sub_underspec a.high b.high) (carry a.low l); }
let sub_mod_impl (a b: t) : t =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_mod (U64.sub_mod a.high b.high) (carry a.low l); }
#push-options "--retry 10" // flaky
let sub_mod_pos_ok (a b:t) : Lemma
(requires (v a - v b >= 0))
(ensures (v (sub_mod_impl a b) = v a - v b)) =
assert (sub a b == sub_mod_impl a b);
()
#pop-options
val u64_diff_wrap : a:U64.t -> b:U64.t ->
Lemma (requires (U64.v a < U64.v b))
(ensures (U64.v (U64.sub_mod a b) == U64.v a - U64.v b + pow2 64))
let u64_diff_wrap a b = ()
#push-options "--z3rlimit 20"
val sub_mod_wrap1_ok : a:t -> b:t -> Lemma
(requires (v a - v b < 0 /\ U64.v a.low < U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128))
#push-options "--retry 10"
let sub_mod_wrap1_ok a b =
// carry == 1 and subtraction in low wraps
let l = U64.sub_mod a.low b.low in
assert (U64.v (carry a.low l) == 1);
u64_diff_wrap a.low b.low;
// a.high <= b.high since v a < v b;
// case split on equality and strictly less
if U64.v a.high = U64.v b.high then ()
else begin
u64_diff_wrap a.high b.high;
()
end
#pop-options
let sum_lt (a1 a2 b1 b2:nat) : Lemma
(requires (a1 + a2 < b1 + b2 /\ a1 >= b1))
(ensures (a2 < b2)) = ()
let sub_mod_wrap2_ok (a b:t) : Lemma
(requires (v a - v b < 0 /\ U64.v a.low >= U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
// carry == 0, subtraction in low is exact, but subtraction in high
// must wrap since v a < v b
let l = U64.sub_mod a.low b.low in
let r = sub_mod_impl a b in
assert (U64.v l == U64.v a.low - U64.v b.low);
assert (U64.v (carry a.low l) == 0);
sum_lt (U64.v a.low) (U64.v a.high * pow2 64) (U64.v b.low) (U64.v b.high * pow2 64);
assert (U64.v (U64.sub_mod a.high b.high) == U64.v a.high - U64.v b.high + pow2 64);
()
let sub_mod_wrap_ok (a b:t) : Lemma
(requires (v a - v b < 0))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
if U64.v a.low < U64.v b.low
then sub_mod_wrap1_ok a b
else sub_mod_wrap2_ok a b
#push-options "--z3rlimit 40"
let sub_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = (v a - v b) % pow2 128)) =
(if v a - v b >= 0
then sub_mod_pos_ok a b
else sub_mod_wrap_ok a b);
sub_mod_impl a b
#pop-options
val shift_bound : #n:nat -> num:UInt.uint_t n -> n':nat ->
Lemma (num * pow2 n' <= pow2 (n'+n) - pow2 n')
let shift_bound #n num n' =
Math.lemma_mult_le_right (pow2 n') num (pow2 n - 1);
Math.distributivity_sub_left (pow2 n) 1 (pow2 n');
Math.pow2_plus n' n
val append_uint : #n1:nat -> #n2:nat -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 -> UInt.uint_t (n1+n2)
let append_uint #n1 #n2 num1 num2 =
shift_bound num2 n1;
num1 + num2 * pow2 n1
val to_vec_append : #n1:nat{n1 > 0} -> #n2:nat{n2 > 0} -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 ->
Lemma (UInt.to_vec (append_uint num1 num2) == Seq.append (UInt.to_vec num2) (UInt.to_vec num1))
let to_vec_append #n1 #n2 num1 num2 =
UInt.append_lemma (UInt.to_vec num2) (UInt.to_vec num1)
let vec128 (a: t) : BV.bv_t 128 = UInt.to_vec #128 (v a)
let vec64 (a: U64.t) : BV.bv_t 64 = UInt.to_vec (U64.v a)
let to_vec_v (a: t) :
Lemma (vec128 a == Seq.append (vec64 a.high) (vec64 a.low)) =
to_vec_append (U64.v a.low) (U64.v a.high)
val logand_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2) ==
BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2))
(BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logand #128 (v a) (v b))) =
let r = { low = U64.logand a.low b.low;
high = U64.logand a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logand_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logand_vec (vec128 a) (vec128 b));
r
val logxor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2) ==
BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2))
(BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logxor #128 (v a) (v b))) =
let r = { low = U64.logxor a.low b.low;
high = U64.logxor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logxor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logxor_vec (vec128 a) (vec128 b));
r
val logor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2) ==
BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2))
(BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logor #128 (v a) (v b))) =
let r = { low = U64.logor a.low b.low;
high = U64.logor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logor_vec (vec128 a) (vec128 b));
r
val lognot_vec_append (#n1 #n2: pos) (a1: BV.bv_t n1) (a2: BV.bv_t n2) :
Lemma (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2) ==
BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot_vec_append #n1 #n2 a1 a2 =
Seq.lemma_eq_intro (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2))
(BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot (a: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.lognot #128 (v a))) =
let r = { low = U64.lognot a.low;
high = U64.lognot a.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
lognot_vec_append (vec64 a.high) (vec64 a.low);
to_vec_v a;
assert (vec128 r == BV.lognot_vec (vec128 a));
r
let mod_mul_cancel (n:nat) (k:nat{k > 0}) :
Lemma ((n * k) % k == 0) =
mod_spec (n * k) k;
mul_div_cancel n k;
()
let shift_past_mod (n:nat) (k1:nat) (k2:nat{k2 >= k1}) :
Lemma ((n * pow2 k2) % pow2 k1 == 0) =
assert (k2 == (k2 - k1) + k1);
Math.pow2_plus (k2 - k1) k1;
Math.paren_mul_right n (pow2 (k2 - k1)) (pow2 k1);
mod_mul_cancel (n * pow2 (k2 - k1)) (pow2 k1)
val mod_double: a:nat -> k:nat{k>0} ->
Lemma (a % k % k == a % k)
let mod_double a k =
mod_mod a k 1
let shift_left_large_val (#n1:nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s:nat) :
Lemma ((a1 + a2 * pow2 n1) * pow2 s == (a1 * pow2 s + a2 * pow2 (n1+s))) =
Math.distributivity_add_left a1 (a2 * pow2 n1) (pow2 s);
Math.paren_mul_right a2 (pow2 n1) (pow2 s);
Math.pow2_plus n1 s
#push-options "--z3rlimit 40"
let shift_left_large_lemma (#n1: nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s: nat{s >= n2}) :
Lemma (((a1 + a2 * pow2 n1) * pow2 s) % pow2 (n1+n2) ==
(a1 * pow2 s) % pow2 (n1+n2)) =
shift_left_large_val a1 a2 s;
mod_add (a1 * pow2 s) (a2 * pow2 (n1+s)) (pow2 (n1+n2));
shift_past_mod a2 (n1+n2) (n1+s);
mod_double (a1 * pow2 s) (pow2 (n1+n2));
()
#pop-options
val shift_left_large_lemma_t : a:t -> s:nat ->
Lemma (requires (s >= 64))
(ensures ((v a * pow2 s) % pow2 128 ==
(U64.v a.low * pow2 s) % pow2 128))
let shift_left_large_lemma_t a s =
shift_left_large_lemma #64 #64 (U64.v a.low) (U64.v a.high) s
private let u32_64: n:U32.t{U32.v n == 64} = U32.uint_to_t 64
val div_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} -> Lemma
(requires True)
(ensures (a / pow2 (n1 - n2) == a * pow2 n2 / pow2 n1))
let div_pow2_diff a n1 n2 =
Math.pow2_plus n2 (n1-n2);
assert (a * pow2 n2 / pow2 n1 == a * pow2 n2 / (pow2 n2 * pow2 (n1 - n2)));
div_product (a * pow2 n2) (pow2 n2) (pow2 (n1-n2));
mul_div_cancel a (pow2 n2)
val mod_mul_pow2 : n:nat -> e1:nat -> e2:nat ->
Lemma (n % pow2 e1 * pow2 e2 <= pow2 (e1+e2) - pow2 e2)
let mod_mul_pow2 n e1 e2 =
Math.lemma_mod_lt n (pow2 e1);
Math.lemma_mult_le_right (pow2 e2) (n % pow2 e1) (pow2 e1 - 1);
assert (n % pow2 e1 * pow2 e2 <= pow2 e1 * pow2 e2 - pow2 e2);
Math.pow2_plus e1 e2
let pow2_div_bound #b (n:UInt.uint_t b) (s:nat{s <= b}) :
Lemma (n / pow2 s < pow2 (b - s)) =
Math.lemma_div_lt n b s
#push-options "--smtencoding.l_arith_repr native --z3rlimit 40"
let add_u64_shift_left (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r = (U64.v hi * pow2 (U32.v s)) % pow2 64 + U64.v lo / pow2 (64 - U32.v s))) =
let high = U64.shift_left hi s in
let low = U64.shift_right lo (U32.sub u32_64 s) in
let s = U32.v s in
let high_n = U64.v hi % pow2 (64 - s) * pow2 s in
let low_n = U64.v lo / pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 s) (pow2 (64-s));
assert (U64.v high == high_n);
assert (U64.v low == low_n);
pow2_div_bound (U64.v lo) (64-s);
assert (low_n < pow2 s);
mod_mul_pow2 (U64.v hi) (64 - s) s;
U64.add high low
#pop-options
let div_plus_multiple (a:nat) (b:nat) (k:pos) :
Lemma (requires (a < k))
(ensures ((a + b * k) / k == b)) =
Math.division_addition_lemma a k b;
Math.small_division_lemma_1 a k
val div_add_small: n:nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (k1*m / (k1*k2) == (n + k1*m) / (k1*k2)))
let div_add_small n m k1 k2 =
div_product (k1*m) k1 k2;
div_product (n+k1*m) k1 k2;
mul_div_cancel m k1;
assert (k1*m/k1 == m);
div_plus_multiple n m k1
val add_mod_small: n: nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (n + (k1 * m) % (k1 * k2) ==
(n + k1 * m) % (k1 * k2)))
let add_mod_small n m k1 k2 =
mod_spec (k1 * m) (k1 * k2);
mod_spec (n + k1 * m) (k1 * k2);
div_add_small n m k1 k2
let mod_then_mul_64 (n:nat) : Lemma (n % pow2 64 * pow2 64 == n * pow2 64 % pow2 128) =
Math.pow2_plus 64 64;
mod_mul n (pow2 64) (pow2 64)
let mul_abc_to_acb (a b c: int) : Lemma (a * b * c == a * c * b) = ()
let add_u64_shift_left_respec (hi lo:U64.t) (s:U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r ->
U64.v r * pow2 64 ==
(U64.v hi * pow2 64) * pow2 (U32.v s) % pow2 128 +
U64.v lo * pow2 (U32.v s) / pow2 64 * pow2 64)) =
let r = add_u64_shift_left hi lo s in
let hi = U64.v hi in
let lo = U64.v lo in
let s = U32.v s in
// spec of add_u64_shift_left
assert (U64.v r == hi * pow2 s % pow2 64 + lo / pow2 (64 - s));
Math.distributivity_add_left (hi * pow2 s % pow2 64) (lo / pow2 (64-s)) (pow2 64);
mod_then_mul_64 (hi * pow2 s);
assert (hi * pow2 s % pow2 64 * pow2 64 == (hi * pow2 s * pow2 64) % pow2 128);
div_pow2_diff lo 64 s;
assert (lo / pow2 (64-s) == lo * pow2 s / pow2 64);
assert (U64.v r * pow2 64 == hi * pow2 s * pow2 64 % pow2 128 + lo * pow2 s / pow2 64 * pow2 64);
mul_abc_to_acb hi (pow2 s) (pow2 64);
r
val add_mod_small' : n:nat -> m:nat -> k:pos ->
Lemma (requires (n + m % k < k))
(ensures (n + m % k == (n + m) % k))
let add_mod_small' n m k =
Math.lemma_mod_lt (n + m % k) k;
Math.modulo_lemma n k;
mod_add n m k
#push-options "--retry 5"
let shift_t_val (a: t) (s: nat) :
Lemma (v a * pow2 s == U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s)) =
Math.pow2_plus 64 s;
()
#pop-options
val mul_mod_bound : n:nat -> s1:nat -> s2:nat{s2>=s1} ->
Lemma (n * pow2 s1 % pow2 s2 <= pow2 s2 - pow2 s1)
#push-options "--retry 5"
let mul_mod_bound n s1 s2 =
// n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1
// n % pow2 (s2-s1) <= pow2 (s2-s1) - 1
// n % pow2 (s2-s1) * pow2 s1 <= pow2 s2 - pow2 s1
mod_mul n (pow2 s1) (pow2 (s2-s1));
// assert (n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1);
Math.lemma_mod_lt n (pow2 (s2-s1));
Math.lemma_mult_le_right (pow2 s1) (n % pow2 (s2-s1)) (pow2 (s2-s1) - 1);
Math.pow2_plus (s2-s1) s1
#pop-options
let add_lt_le (a a' b b': int) :
Lemma (requires (a < a' /\ b <= b'))
(ensures (a + b < a' + b')) = ()
let u64_pow2_bound (a: UInt.uint_t 64) (s: nat) :
Lemma (a * pow2 s < pow2 (64+s)) =
Math.pow2_plus 64 s;
Math.lemma_mult_le_right (pow2 s) a (pow2 64)
let shift_t_mod_val' (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s) % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
u64_pow2_bound a_l s;
mul_mod_bound a_h (64+s) 128;
// assert (a_h * pow2 (64+s) % pow2 128 <= pow2 128 - pow2 (64+s));
add_lt_le (a_l * pow2 s) (pow2 (64+s)) (a_h * pow2 (64+s) % pow2 128) (pow2 128 - pow2 (64+s));
add_mod_small' (a_l * pow2 s) (a_h * pow2 (64+s)) (pow2 128);
shift_t_val a s;
()
let shift_t_mod_val (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + (U64.v a.high * pow2 64) * pow2 s % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
shift_t_mod_val' a s;
Math.pow2_plus 64 s;
Math.paren_mul_right a_h (pow2 64) (pow2 s);
()
#push-options "--z3rlimit 20"
let shift_left_small (a: t) (s: U32.t) : Pure t
(requires (U32.v s < 64))
(ensures (fun r -> v r = (v a * pow2 (U32.v s)) % pow2 128)) =
if U32.eq s 0ul then a
else
let r = { low = U64.shift_left a.low s;
high = add_u64_shift_left_respec a.high a.low s; } in
let s = U32.v s in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
mod_spec_rew_n (a_l * pow2 s) (pow2 64);
shift_t_mod_val a s;
r
#pop-options
val shift_left_large : a:t -> s:U32.t{U32.v s >= 64 /\ U32.v s < 128} ->
r:t{v r = (v a * pow2 (U32.v s)) % pow2 128}
#push-options "--z3rlimit 50 --retry 5" // sporadically fails
let shift_left_large a s =
let h_shift = U32.sub s u32_64 in
assert (U32.v h_shift < 64);
let r = { low = U64.uint_to_t 0;
high = U64.shift_left a.low h_shift; } in
assert (U64.v r.high == (U64.v a.low * pow2 (U32.v s - 64)) % pow2 64);
mod_mul (U64.v a.low * pow2 (U32.v s - 64)) (pow2 64) (pow2 64);
Math.pow2_plus (U32.v s - 64) 64;
assert (U64.v r.high * pow2 64 == (U64.v a.low * pow2 (U32.v s)) % pow2 128);
shift_left_large_lemma_t a (U32.v s);
r
#pop-options
let shift_left a s =
if (U32.lt s u32_64) then shift_left_small a s
else shift_left_large a s
let add_u64_shift_right (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r == U64.v lo / pow2 (U32.v s) +
U64.v hi * pow2 (64 - U32.v s) % pow2 64)) =
let low = U64.shift_right lo s in
let high = U64.shift_left hi (U32.sub u32_64 s) in
let s = U32.v s in
let low_n = U64.v lo / pow2 s in
let high_n = U64.v hi % pow2 s * pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 (64-s)) (pow2 s);
assert (U64.v high == high_n);
pow2_div_bound (U64.v lo) s;
assert (low_n < pow2 (64 - s));
mod_mul_pow2 (U64.v hi) s (64 - s);
U64.add low high
val mul_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} ->
Lemma (a * pow2 (n1 - n2) == a * pow2 n1 / pow2 n2)
let mul_pow2_diff a n1 n2 =
Math.paren_mul_right a (pow2 (n1-n2)) (pow2 n2);
mul_div_cancel (a * pow2 (n1 - n2)) (pow2 n2);
Math.pow2_plus (n1 - n2) n2
let add_u64_shift_right_respec (hi lo:U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r == U64.v lo / pow2 (U32.v s) +
U64.v hi * pow2 64 / pow2 (U32.v s) % pow2 64)) =
let r = add_u64_shift_right hi lo s in
let s = U32.v s in
mul_pow2_diff (U64.v hi) 64 s;
r
let mul_div_spec (n:nat) (k:pos) : Lemma (n / k * k == n - n % k) = ()
let mul_distr_sub (n1 n2:nat) (k:nat) : Lemma ((n1 - n2) * k == n1 * k - n2 * k) = ()
val div_product_comm : n1:nat -> k1:pos -> k2:pos ->
Lemma (n1 / k1 / k2 == n1 / k2 / k1)
let div_product_comm n1 k1 k2 =
div_product n1 k1 k2;
div_product n1 k2 k1
val shift_right_reconstruct : a_h:UInt.uint_t 64 -> s:nat{s < 64} ->
Lemma (a_h * pow2 (64-s) == a_h / pow2 s * pow2 64 + a_h * pow2 64 / pow2 s % pow2 64)
let shift_right_reconstruct a_h s =
mul_pow2_diff a_h 64 s;
mod_spec_rew_n (a_h * pow2 (64-s)) (pow2 64);
div_product_comm (a_h * pow2 64) (pow2 s) (pow2 64);
mul_div_cancel a_h (pow2 64);
assert (a_h / pow2 s * pow2 64 == a_h * pow2 64 / pow2 s / pow2 64 * pow2 64);
()
val u128_div_pow2 (a: t) (s:nat{s < 64}) :
Lemma (v a / pow2 s == U64.v a.low / pow2 s + U64.v a.high * pow2 (64 - s))
let u128_div_pow2 a s =
Math.pow2_plus (64-s) s;
Math.paren_mul_right (U64.v a.high) (pow2 (64-s)) (pow2 s);
Math.division_addition_lemma (U64.v a.low) (pow2 s) (U64.v a.high * pow2 (64 - s))
let shift_right_small (a: t) (s: U32.t{U32.v s < 64}) : Pure t
(requires True)
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
if U32.eq s 0ul then a
else
let r = { low = add_u64_shift_right_respec a.high a.low s;
high = U64.shift_right a.high s; } in
let a_h = U64.v a.high in
let a_l = U64.v a.low in
let s = U32.v s in
shift_right_reconstruct a_h s;
assert (v r == a_h * pow2 (64-s) + a_l / pow2 s);
u128_div_pow2 a s;
r
let shift_right_large (a: t) (s: U32.t{U32.v s >= 64 /\ U32.v s < 128}) : Pure t
(requires True)
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
let r = { high = U64.uint_to_t 0;
low = U64.shift_right a.high (U32.sub s u32_64); } in
let s = U32.v s in
Math.pow2_plus 64 (s - 64);
div_product (v a) (pow2 64) (pow2 (s - 64));
assert (v a / pow2 s == v a / pow2 64 / pow2 (s - 64));
div_plus_multiple (U64.v a.low) (U64.v a.high) (pow2 64);
r
let shift_right (a: t) (s: U32.t) : Pure t
(requires (U32.v s < 128))
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
if U32.lt s u32_64
then shift_right_small a s
else shift_right_large a s
let eq (a b:t) = U64.eq a.low b.low && U64.eq a.high b.high
let gt (a b:t) = U64.gt a.high b.high ||
(U64.eq a.high b.high && U64.gt a.low b.low)
let lt (a b:t) = U64.lt a.high b.high ||
(U64.eq a.high b.high && U64.lt a.low b.low)
let gte (a b:t) = U64.gt a.high b.high ||
(U64.eq a.high b.high && U64.gte a.low b.low)
let lte (a b:t) = U64.lt a.high b.high ||
(U64.eq a.high b.high && U64.lte a.low b.low)
let u64_logand_comm (a b:U64.t) : Lemma (U64.logand a b == U64.logand b a) =
UInt.logand_commutative (U64.v a) (U64.v b)
val u64_and_0 (a b:U64.t) :
Lemma (U64.v b = 0 ==> U64.v (U64.logand a b) = 0)
[SMTPat (U64.logand a b)]
let u64_and_0 a b = UInt.logand_lemma_1 (U64.v a)
let u64_0_and (a b:U64.t) :
Lemma (U64.v a = 0 ==> U64.v (U64.logand a b) = 0)
[SMTPat (U64.logand a b)] =
u64_logand_comm a b
val u64_1s_and (a b:U64.t) :
Lemma (U64.v a = pow2 64 - 1 /\
U64.v b = pow2 64 - 1 ==> U64.v (U64.logand a b) = pow2 64 - 1) | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val u64_1s_and (a b:U64.t) :
Lemma (U64.v a = pow2 64 - 1 /\
U64.v b = pow2 64 - 1 ==> U64.v (U64.logand a b) = pow2 64 - 1)
[SMTPat (U64.logand a b)] | [] | FStar.UInt128.u64_1s_and | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: FStar.UInt64.t -> b: FStar.UInt64.t
-> FStar.Pervasives.Lemma
(ensures
FStar.UInt64.v a = Prims.pow2 64 - 1 /\ FStar.UInt64.v b = Prims.pow2 64 - 1 ==>
FStar.UInt64.v (FStar.UInt64.logand a b) = Prims.pow2 64 - 1)
[SMTPat (FStar.UInt64.logand a b)] | {
"end_col": 50,
"end_line": 834,
"start_col": 21,
"start_line": 834
} |
FStar.Pervasives.Lemma | val shift_left_large_val (#n1 #n2: nat) (a1: UInt.uint_t n1) (a2: UInt.uint_t n2) (s: nat)
: Lemma ((a1 + a2 * pow2 n1) * pow2 s == (a1 * pow2 s + a2 * pow2 (n1 + s))) | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let shift_left_large_val (#n1:nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s:nat) :
Lemma ((a1 + a2 * pow2 n1) * pow2 s == (a1 * pow2 s + a2 * pow2 (n1+s))) =
Math.distributivity_add_left a1 (a2 * pow2 n1) (pow2 s);
Math.paren_mul_right a2 (pow2 n1) (pow2 s);
Math.pow2_plus n1 s | val shift_left_large_val (#n1 #n2: nat) (a1: UInt.uint_t n1) (a2: UInt.uint_t n2) (s: nat)
: Lemma ((a1 + a2 * pow2 n1) * pow2 s == (a1 * pow2 s + a2 * pow2 (n1 + s)))
let shift_left_large_val (#n1 #n2: nat) (a1: UInt.uint_t n1) (a2: UInt.uint_t n2) (s: nat)
: Lemma ((a1 + a2 * pow2 n1) * pow2 s == (a1 * pow2 s + a2 * pow2 (n1 + s))) = | false | null | true | Math.distributivity_add_left a1 (a2 * pow2 n1) (pow2 s);
Math.paren_mul_right a2 (pow2 n1) (pow2 s);
Math.pow2_plus n1 s | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [
"lemma"
] | [
"Prims.nat",
"FStar.UInt.uint_t",
"FStar.Math.Lemmas.pow2_plus",
"Prims.unit",
"FStar.Math.Lemmas.paren_mul_right",
"Prims.pow2",
"FStar.Math.Lemmas.distributivity_add_left",
"FStar.Mul.op_Star",
"Prims.l_True",
"Prims.squash",
"Prims.eq2",
"Prims.int",
"Prims.op_Addition",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract
let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63
let carry_uint64_ok (a b:uint_t 64)
: squash (int2bv (carry_uint64 a b) == carry_bv a b)
= _ by (T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl())
let fact1 (a b: uint_t 64) = carry_bv a b == int2bv 1
let fact0 (a b: uint_t 64) = carry_bv a b == int2bv 0
let lem_ult_1 (a b: uint_t 64)
: squash (bvult (int2bv a) (int2bv b) ==> fact1 a b)
= assert (bvult (int2bv a) (int2bv b) ==> fact1 a b)
by (T.norm [delta_only [`%fact1;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'";
smt())
let lem_ult_2 (a b:uint_t 64)
: squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
= assert (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
by (T.norm [delta_only [`%fact0;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'")
let int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b))
= introduce (a < b) ==> (bvult #n (int2bv #n a) (int2bv #n b))
with _ . FStar.BV.int2bv_lemma_ult_1 a b;
introduce (bvult #n (int2bv #n a) (int2bv #n b)) ==> (a < b)
with _ . FStar.BV.int2bv_lemma_ult_2 a b
let lem_ult (a b:uint_t 64)
: Lemma (if a < b
then fact1 a b
else fact0 a b)
= int2bv_ult a b;
lem_ult_1 a b;
lem_ult_2 a b
let constant_time_carry (a b: U64.t) : Tot U64.t =
let open U64 in
// CONSTANT_TIME_CARRY macro
// ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
// 63 = sizeof(a) * 8 - 1
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63ul
let carry_uint64_equiv (a b:UInt64.t)
: Lemma (U64.v (constant_time_carry a b) == carry_uint64 (U64.v a) (U64.v b))
= ()
// This type gets a special treatment in KaRaMeL and its definition is never
// printed in the resulting C file.
type uint128: Type0 = { low: U64.t; high: U64.t }
let t = uint128
let _ = intro_ambient n
let _ = intro_ambient t
[@@ noextract_to "krml"]
let v x = U64.v x.low + (U64.v x.high) * (pow2 64)
let div_mod (x:nat) (k:nat{k > 0}) : Lemma (x / k * k + x % k == x) = ()
let uint_to_t x =
div_mod x (pow2 64);
{ low = U64.uint_to_t (x % (pow2 64));
high = U64.uint_to_t (x / (pow2 64)); }
let v_inj (x1 x2: t): Lemma (requires (v x1 == v x2))
(ensures x1 == x2) =
assert (uint_to_t (v x1) == uint_to_t (v x2));
assert (uint_to_t (v x1) == x1);
assert (uint_to_t (v x2) == x2);
()
(* A weird helper used below... seems like the native encoding of
bitvectors may be making these proofs much harder than they should be? *)
let bv2int_fun (#n:pos) (a b : bv_t n)
: Lemma (requires a == b)
(ensures bv2int a == bv2int b)
= ()
(* This proof is quite brittle. It has a bunch of annotations to get
decent verification performance. *)
let constant_time_carry_ok (a b:U64.t)
: Lemma (constant_time_carry a b ==
(if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0))
= calc (==) {
U64.v (constant_time_carry a b);
(==) { carry_uint64_equiv a b }
carry_uint64 (U64.v a) (U64.v b);
(==) { inverse_num_lemma (carry_uint64 (U64.v a) (U64.v b)) }
bv2int (int2bv (carry_uint64 (U64.v a) (U64.v b)));
(==) { carry_uint64_ok (U64.v a) (U64.v b);
bv2int_fun (int2bv (carry_uint64 (U64.v a) (U64.v b))) (carry_bv (U64.v a) (U64.v b));
()
}
bv2int (carry_bv (U64.v a) (U64.v b));
(==) {
lem_ult (U64.v a) (U64.v b);
bv2int_fun (carry_bv (U64.v a) (U64.v b)) (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
}
bv2int
(if U64.v a < U64.v b
then int2bv 1
else int2bv 0);
};
assert (
bv2int (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
== U64.v (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)) by (T.norm []);
U64.v_inj (constant_time_carry a b) (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)
let carry (a b: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r == (if U64.v a < U64.v b then 1 else 0))) =
constant_time_carry_ok a b;
constant_time_carry a b
let carry_sum_ok (a b:U64.t) :
Lemma (U64.v (carry (U64.add_mod a b) b) == (U64.v a + U64.v b) / (pow2 64)) = ()
let add (a b: t) : Pure t
(requires (v a + v b < pow2 128))
(ensures (fun r -> v a + v b = v r)) =
let l = U64.add_mod a.low b.low in
carry_sum_ok a.low b.low;
{ low = l;
high = U64.add (U64.add a.high b.high) (carry l b.low); }
let add_underspec (a b: t) =
let l = U64.add_mod a.low b.low in
begin
if v a + v b < pow2 128
then carry_sum_ok a.low b.low
else ()
end;
{ low = l;
high = U64.add_underspec (U64.add_underspec a.high b.high) (carry l b.low); }
val mod_mod: a:nat -> k:nat{k>0} -> k':nat{k'>0} ->
Lemma ((a % k) % (k'*k) == a % k)
let mod_mod a k k' =
assert (a % k < k);
assert (a % k < k' * k)
let mod_spec (a:nat) (k:nat{k > 0}) :
Lemma (a % k == a - a / k * k) = ()
val div_product : n:nat -> m1:nat{m1>0} -> m2:nat{m2>0} ->
Lemma (n / (m1*m2) == (n / m1) / m2)
let div_product n m1 m2 =
Math.division_multiplication_lemma n m1 m2
val mul_div_cancel : n:nat -> k:nat{k>0} ->
Lemma ((n * k) / k == n)
let mul_div_cancel n k =
Math.cancel_mul_div n k
val mod_mul: n:nat -> k1:pos -> k2:pos ->
Lemma ((n % k2) * k1 == (n * k1) % (k1*k2))
let mod_mul n k1 k2 =
Math.modulo_scale_lemma n k1 k2
let mod_spec_rew_n (n:nat) (k:nat{k > 0}) :
Lemma (n == n / k * k + n % k) = mod_spec n k
val mod_add: n1:nat -> n2:nat -> k:nat{k > 0} ->
Lemma ((n1 % k + n2 % k) % k == (n1 + n2) % k)
let mod_add n1 n2 k = Math.modulo_distributivity n1 n2 k
val mod_add_small: n1:nat -> n2:nat -> k:nat{k > 0} -> Lemma
(requires (n1 % k + n2 % k < k))
(ensures (n1 % k + n2 % k == (n1 + n2) % k))
let mod_add_small n1 n2 k =
mod_add n1 n2 k;
Math.small_modulo_lemma_1 (n1%k + n2%k) k
// This proof is pretty stable with the calc proof, but it can fail
// ~1% of the times, so add a retry.
#push-options "--split_queries no --z3rlimit 20 --retry 5"
let add_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a + v b) % pow2 128 = v r)) =
let l = U64.add_mod a.low b.low in
let r = { low = l;
high = U64.add_mod (U64.add_mod a.high b.high) (carry l b.low)} in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
let b_l = U64.v b.low in
let b_h = U64.v b.high in
carry_sum_ok a.low b.low;
Math.lemma_mod_plus_distr_l (a_h + b_h) ((a_l + b_l) / (pow2 64)) (pow2 64);
calc (==) {
U64.v r.high * pow2 64;
== {}
((a_h + b_h + (a_l + b_l) / (pow2 64)) % pow2 64) * pow2 64;
== { mod_mul (a_h + b_h + (a_l + b_l) / (pow2 64)) (pow2 64) (pow2 64) }
((a_h + b_h + (a_l + b_l) / (pow2 64)) * pow2 64) % (pow2 64 * pow2 64);
== {}
((a_h + b_h + (a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
== {}
(a_h * pow2 64 + b_h * pow2 64 + ((a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
};
assert (U64.v r.low == (U64.v r.low) % pow2 128);
mod_add_small (a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64))
((a_l + b_l) % (pow2 64))
(pow2 128);
assert (U64.v r.low + U64.v r.high * pow2 64 ==
(a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64) + (a_l + b_l) % (pow2 64)) % pow2 128);
mod_spec_rew_n (a_l + b_l) (pow2 64);
assert (v r ==
(a_h * pow2 64 +
b_h * pow2 64 +
a_l + b_l) % pow2 128);
assert_spinoff ((v a + v b) % pow2 128 = v r);
r
#pop-options
#push-options "--retry 5"
let sub (a b: t) : Pure t
(requires (v a - v b >= 0))
(ensures (fun r -> v r = v a - v b)) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub (U64.sub a.high b.high) (carry a.low l); }
#pop-options
let sub_underspec (a b: t) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_underspec (U64.sub_underspec a.high b.high) (carry a.low l); }
let sub_mod_impl (a b: t) : t =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_mod (U64.sub_mod a.high b.high) (carry a.low l); }
#push-options "--retry 10" // flaky
let sub_mod_pos_ok (a b:t) : Lemma
(requires (v a - v b >= 0))
(ensures (v (sub_mod_impl a b) = v a - v b)) =
assert (sub a b == sub_mod_impl a b);
()
#pop-options
val u64_diff_wrap : a:U64.t -> b:U64.t ->
Lemma (requires (U64.v a < U64.v b))
(ensures (U64.v (U64.sub_mod a b) == U64.v a - U64.v b + pow2 64))
let u64_diff_wrap a b = ()
#push-options "--z3rlimit 20"
val sub_mod_wrap1_ok : a:t -> b:t -> Lemma
(requires (v a - v b < 0 /\ U64.v a.low < U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128))
#push-options "--retry 10"
let sub_mod_wrap1_ok a b =
// carry == 1 and subtraction in low wraps
let l = U64.sub_mod a.low b.low in
assert (U64.v (carry a.low l) == 1);
u64_diff_wrap a.low b.low;
// a.high <= b.high since v a < v b;
// case split on equality and strictly less
if U64.v a.high = U64.v b.high then ()
else begin
u64_diff_wrap a.high b.high;
()
end
#pop-options
let sum_lt (a1 a2 b1 b2:nat) : Lemma
(requires (a1 + a2 < b1 + b2 /\ a1 >= b1))
(ensures (a2 < b2)) = ()
let sub_mod_wrap2_ok (a b:t) : Lemma
(requires (v a - v b < 0 /\ U64.v a.low >= U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
// carry == 0, subtraction in low is exact, but subtraction in high
// must wrap since v a < v b
let l = U64.sub_mod a.low b.low in
let r = sub_mod_impl a b in
assert (U64.v l == U64.v a.low - U64.v b.low);
assert (U64.v (carry a.low l) == 0);
sum_lt (U64.v a.low) (U64.v a.high * pow2 64) (U64.v b.low) (U64.v b.high * pow2 64);
assert (U64.v (U64.sub_mod a.high b.high) == U64.v a.high - U64.v b.high + pow2 64);
()
let sub_mod_wrap_ok (a b:t) : Lemma
(requires (v a - v b < 0))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
if U64.v a.low < U64.v b.low
then sub_mod_wrap1_ok a b
else sub_mod_wrap2_ok a b
#push-options "--z3rlimit 40"
let sub_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = (v a - v b) % pow2 128)) =
(if v a - v b >= 0
then sub_mod_pos_ok a b
else sub_mod_wrap_ok a b);
sub_mod_impl a b
#pop-options
val shift_bound : #n:nat -> num:UInt.uint_t n -> n':nat ->
Lemma (num * pow2 n' <= pow2 (n'+n) - pow2 n')
let shift_bound #n num n' =
Math.lemma_mult_le_right (pow2 n') num (pow2 n - 1);
Math.distributivity_sub_left (pow2 n) 1 (pow2 n');
Math.pow2_plus n' n
val append_uint : #n1:nat -> #n2:nat -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 -> UInt.uint_t (n1+n2)
let append_uint #n1 #n2 num1 num2 =
shift_bound num2 n1;
num1 + num2 * pow2 n1
val to_vec_append : #n1:nat{n1 > 0} -> #n2:nat{n2 > 0} -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 ->
Lemma (UInt.to_vec (append_uint num1 num2) == Seq.append (UInt.to_vec num2) (UInt.to_vec num1))
let to_vec_append #n1 #n2 num1 num2 =
UInt.append_lemma (UInt.to_vec num2) (UInt.to_vec num1)
let vec128 (a: t) : BV.bv_t 128 = UInt.to_vec #128 (v a)
let vec64 (a: U64.t) : BV.bv_t 64 = UInt.to_vec (U64.v a)
let to_vec_v (a: t) :
Lemma (vec128 a == Seq.append (vec64 a.high) (vec64 a.low)) =
to_vec_append (U64.v a.low) (U64.v a.high)
val logand_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2) ==
BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2))
(BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logand #128 (v a) (v b))) =
let r = { low = U64.logand a.low b.low;
high = U64.logand a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logand_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logand_vec (vec128 a) (vec128 b));
r
val logxor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2) ==
BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2))
(BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logxor #128 (v a) (v b))) =
let r = { low = U64.logxor a.low b.low;
high = U64.logxor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logxor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logxor_vec (vec128 a) (vec128 b));
r
val logor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2) ==
BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2))
(BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logor #128 (v a) (v b))) =
let r = { low = U64.logor a.low b.low;
high = U64.logor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logor_vec (vec128 a) (vec128 b));
r
val lognot_vec_append (#n1 #n2: pos) (a1: BV.bv_t n1) (a2: BV.bv_t n2) :
Lemma (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2) ==
BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot_vec_append #n1 #n2 a1 a2 =
Seq.lemma_eq_intro (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2))
(BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot (a: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.lognot #128 (v a))) =
let r = { low = U64.lognot a.low;
high = U64.lognot a.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
lognot_vec_append (vec64 a.high) (vec64 a.low);
to_vec_v a;
assert (vec128 r == BV.lognot_vec (vec128 a));
r
let mod_mul_cancel (n:nat) (k:nat{k > 0}) :
Lemma ((n * k) % k == 0) =
mod_spec (n * k) k;
mul_div_cancel n k;
()
let shift_past_mod (n:nat) (k1:nat) (k2:nat{k2 >= k1}) :
Lemma ((n * pow2 k2) % pow2 k1 == 0) =
assert (k2 == (k2 - k1) + k1);
Math.pow2_plus (k2 - k1) k1;
Math.paren_mul_right n (pow2 (k2 - k1)) (pow2 k1);
mod_mul_cancel (n * pow2 (k2 - k1)) (pow2 k1)
val mod_double: a:nat -> k:nat{k>0} ->
Lemma (a % k % k == a % k)
let mod_double a k =
mod_mod a k 1
let shift_left_large_val (#n1:nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s:nat) : | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val shift_left_large_val (#n1 #n2: nat) (a1: UInt.uint_t n1) (a2: UInt.uint_t n2) (s: nat)
: Lemma ((a1 + a2 * pow2 n1) * pow2 s == (a1 * pow2 s + a2 * pow2 (n1 + s))) | [] | FStar.UInt128.shift_left_large_val | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a1: FStar.UInt.uint_t n1 -> a2: FStar.UInt.uint_t n2 -> s: Prims.nat
-> FStar.Pervasives.Lemma
(ensures
(a1 + a2 * Prims.pow2 n1) * Prims.pow2 s == a1 * Prims.pow2 s + a2 * Prims.pow2 (n1 + s)) | {
"end_col": 21,
"end_line": 499,
"start_col": 2,
"start_line": 497
} |
Prims.Tot | val plh (x y: U64.t) : n: UInt.uint_t 64 {n < pow2 64 - pow2 32 - 1} | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let plh (x y: U64.t) : n:UInt.uint_t 64{n < pow2 64 - pow2 32 - 1} =
mul32_bound (l32 (U64.v x)) (h32 (U64.v y)) | val plh (x y: U64.t) : n: UInt.uint_t 64 {n < pow2 64 - pow2 32 - 1}
let plh (x y: U64.t) : n: UInt.uint_t 64 {n < pow2 64 - pow2 32 - 1} = | false | null | false | mul32_bound (l32 (U64.v x)) (h32 (U64.v y)) | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [
"total"
] | [
"FStar.UInt64.t",
"FStar.UInt128.mul32_bound",
"FStar.UInt128.l32",
"FStar.UInt64.v",
"FStar.UInt128.h32",
"FStar.UInt.uint_t",
"Prims.b2t",
"Prims.op_LessThan",
"Prims.op_Subtraction",
"Prims.pow2"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract
let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63
let carry_uint64_ok (a b:uint_t 64)
: squash (int2bv (carry_uint64 a b) == carry_bv a b)
= _ by (T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl())
let fact1 (a b: uint_t 64) = carry_bv a b == int2bv 1
let fact0 (a b: uint_t 64) = carry_bv a b == int2bv 0
let lem_ult_1 (a b: uint_t 64)
: squash (bvult (int2bv a) (int2bv b) ==> fact1 a b)
= assert (bvult (int2bv a) (int2bv b) ==> fact1 a b)
by (T.norm [delta_only [`%fact1;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'";
smt())
let lem_ult_2 (a b:uint_t 64)
: squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
= assert (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
by (T.norm [delta_only [`%fact0;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'")
let int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b))
= introduce (a < b) ==> (bvult #n (int2bv #n a) (int2bv #n b))
with _ . FStar.BV.int2bv_lemma_ult_1 a b;
introduce (bvult #n (int2bv #n a) (int2bv #n b)) ==> (a < b)
with _ . FStar.BV.int2bv_lemma_ult_2 a b
let lem_ult (a b:uint_t 64)
: Lemma (if a < b
then fact1 a b
else fact0 a b)
= int2bv_ult a b;
lem_ult_1 a b;
lem_ult_2 a b
let constant_time_carry (a b: U64.t) : Tot U64.t =
let open U64 in
// CONSTANT_TIME_CARRY macro
// ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
// 63 = sizeof(a) * 8 - 1
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63ul
let carry_uint64_equiv (a b:UInt64.t)
: Lemma (U64.v (constant_time_carry a b) == carry_uint64 (U64.v a) (U64.v b))
= ()
// This type gets a special treatment in KaRaMeL and its definition is never
// printed in the resulting C file.
type uint128: Type0 = { low: U64.t; high: U64.t }
let t = uint128
let _ = intro_ambient n
let _ = intro_ambient t
[@@ noextract_to "krml"]
let v x = U64.v x.low + (U64.v x.high) * (pow2 64)
let div_mod (x:nat) (k:nat{k > 0}) : Lemma (x / k * k + x % k == x) = ()
let uint_to_t x =
div_mod x (pow2 64);
{ low = U64.uint_to_t (x % (pow2 64));
high = U64.uint_to_t (x / (pow2 64)); }
let v_inj (x1 x2: t): Lemma (requires (v x1 == v x2))
(ensures x1 == x2) =
assert (uint_to_t (v x1) == uint_to_t (v x2));
assert (uint_to_t (v x1) == x1);
assert (uint_to_t (v x2) == x2);
()
(* A weird helper used below... seems like the native encoding of
bitvectors may be making these proofs much harder than they should be? *)
let bv2int_fun (#n:pos) (a b : bv_t n)
: Lemma (requires a == b)
(ensures bv2int a == bv2int b)
= ()
(* This proof is quite brittle. It has a bunch of annotations to get
decent verification performance. *)
let constant_time_carry_ok (a b:U64.t)
: Lemma (constant_time_carry a b ==
(if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0))
= calc (==) {
U64.v (constant_time_carry a b);
(==) { carry_uint64_equiv a b }
carry_uint64 (U64.v a) (U64.v b);
(==) { inverse_num_lemma (carry_uint64 (U64.v a) (U64.v b)) }
bv2int (int2bv (carry_uint64 (U64.v a) (U64.v b)));
(==) { carry_uint64_ok (U64.v a) (U64.v b);
bv2int_fun (int2bv (carry_uint64 (U64.v a) (U64.v b))) (carry_bv (U64.v a) (U64.v b));
()
}
bv2int (carry_bv (U64.v a) (U64.v b));
(==) {
lem_ult (U64.v a) (U64.v b);
bv2int_fun (carry_bv (U64.v a) (U64.v b)) (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
}
bv2int
(if U64.v a < U64.v b
then int2bv 1
else int2bv 0);
};
assert (
bv2int (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
== U64.v (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)) by (T.norm []);
U64.v_inj (constant_time_carry a b) (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)
let carry (a b: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r == (if U64.v a < U64.v b then 1 else 0))) =
constant_time_carry_ok a b;
constant_time_carry a b
let carry_sum_ok (a b:U64.t) :
Lemma (U64.v (carry (U64.add_mod a b) b) == (U64.v a + U64.v b) / (pow2 64)) = ()
let add (a b: t) : Pure t
(requires (v a + v b < pow2 128))
(ensures (fun r -> v a + v b = v r)) =
let l = U64.add_mod a.low b.low in
carry_sum_ok a.low b.low;
{ low = l;
high = U64.add (U64.add a.high b.high) (carry l b.low); }
let add_underspec (a b: t) =
let l = U64.add_mod a.low b.low in
begin
if v a + v b < pow2 128
then carry_sum_ok a.low b.low
else ()
end;
{ low = l;
high = U64.add_underspec (U64.add_underspec a.high b.high) (carry l b.low); }
val mod_mod: a:nat -> k:nat{k>0} -> k':nat{k'>0} ->
Lemma ((a % k) % (k'*k) == a % k)
let mod_mod a k k' =
assert (a % k < k);
assert (a % k < k' * k)
let mod_spec (a:nat) (k:nat{k > 0}) :
Lemma (a % k == a - a / k * k) = ()
val div_product : n:nat -> m1:nat{m1>0} -> m2:nat{m2>0} ->
Lemma (n / (m1*m2) == (n / m1) / m2)
let div_product n m1 m2 =
Math.division_multiplication_lemma n m1 m2
val mul_div_cancel : n:nat -> k:nat{k>0} ->
Lemma ((n * k) / k == n)
let mul_div_cancel n k =
Math.cancel_mul_div n k
val mod_mul: n:nat -> k1:pos -> k2:pos ->
Lemma ((n % k2) * k1 == (n * k1) % (k1*k2))
let mod_mul n k1 k2 =
Math.modulo_scale_lemma n k1 k2
let mod_spec_rew_n (n:nat) (k:nat{k > 0}) :
Lemma (n == n / k * k + n % k) = mod_spec n k
val mod_add: n1:nat -> n2:nat -> k:nat{k > 0} ->
Lemma ((n1 % k + n2 % k) % k == (n1 + n2) % k)
let mod_add n1 n2 k = Math.modulo_distributivity n1 n2 k
val mod_add_small: n1:nat -> n2:nat -> k:nat{k > 0} -> Lemma
(requires (n1 % k + n2 % k < k))
(ensures (n1 % k + n2 % k == (n1 + n2) % k))
let mod_add_small n1 n2 k =
mod_add n1 n2 k;
Math.small_modulo_lemma_1 (n1%k + n2%k) k
// This proof is pretty stable with the calc proof, but it can fail
// ~1% of the times, so add a retry.
#push-options "--split_queries no --z3rlimit 20 --retry 5"
let add_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a + v b) % pow2 128 = v r)) =
let l = U64.add_mod a.low b.low in
let r = { low = l;
high = U64.add_mod (U64.add_mod a.high b.high) (carry l b.low)} in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
let b_l = U64.v b.low in
let b_h = U64.v b.high in
carry_sum_ok a.low b.low;
Math.lemma_mod_plus_distr_l (a_h + b_h) ((a_l + b_l) / (pow2 64)) (pow2 64);
calc (==) {
U64.v r.high * pow2 64;
== {}
((a_h + b_h + (a_l + b_l) / (pow2 64)) % pow2 64) * pow2 64;
== { mod_mul (a_h + b_h + (a_l + b_l) / (pow2 64)) (pow2 64) (pow2 64) }
((a_h + b_h + (a_l + b_l) / (pow2 64)) * pow2 64) % (pow2 64 * pow2 64);
== {}
((a_h + b_h + (a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
== {}
(a_h * pow2 64 + b_h * pow2 64 + ((a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
};
assert (U64.v r.low == (U64.v r.low) % pow2 128);
mod_add_small (a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64))
((a_l + b_l) % (pow2 64))
(pow2 128);
assert (U64.v r.low + U64.v r.high * pow2 64 ==
(a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64) + (a_l + b_l) % (pow2 64)) % pow2 128);
mod_spec_rew_n (a_l + b_l) (pow2 64);
assert (v r ==
(a_h * pow2 64 +
b_h * pow2 64 +
a_l + b_l) % pow2 128);
assert_spinoff ((v a + v b) % pow2 128 = v r);
r
#pop-options
#push-options "--retry 5"
let sub (a b: t) : Pure t
(requires (v a - v b >= 0))
(ensures (fun r -> v r = v a - v b)) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub (U64.sub a.high b.high) (carry a.low l); }
#pop-options
let sub_underspec (a b: t) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_underspec (U64.sub_underspec a.high b.high) (carry a.low l); }
let sub_mod_impl (a b: t) : t =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_mod (U64.sub_mod a.high b.high) (carry a.low l); }
#push-options "--retry 10" // flaky
let sub_mod_pos_ok (a b:t) : Lemma
(requires (v a - v b >= 0))
(ensures (v (sub_mod_impl a b) = v a - v b)) =
assert (sub a b == sub_mod_impl a b);
()
#pop-options
val u64_diff_wrap : a:U64.t -> b:U64.t ->
Lemma (requires (U64.v a < U64.v b))
(ensures (U64.v (U64.sub_mod a b) == U64.v a - U64.v b + pow2 64))
let u64_diff_wrap a b = ()
#push-options "--z3rlimit 20"
val sub_mod_wrap1_ok : a:t -> b:t -> Lemma
(requires (v a - v b < 0 /\ U64.v a.low < U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128))
#push-options "--retry 10"
let sub_mod_wrap1_ok a b =
// carry == 1 and subtraction in low wraps
let l = U64.sub_mod a.low b.low in
assert (U64.v (carry a.low l) == 1);
u64_diff_wrap a.low b.low;
// a.high <= b.high since v a < v b;
// case split on equality and strictly less
if U64.v a.high = U64.v b.high then ()
else begin
u64_diff_wrap a.high b.high;
()
end
#pop-options
let sum_lt (a1 a2 b1 b2:nat) : Lemma
(requires (a1 + a2 < b1 + b2 /\ a1 >= b1))
(ensures (a2 < b2)) = ()
let sub_mod_wrap2_ok (a b:t) : Lemma
(requires (v a - v b < 0 /\ U64.v a.low >= U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
// carry == 0, subtraction in low is exact, but subtraction in high
// must wrap since v a < v b
let l = U64.sub_mod a.low b.low in
let r = sub_mod_impl a b in
assert (U64.v l == U64.v a.low - U64.v b.low);
assert (U64.v (carry a.low l) == 0);
sum_lt (U64.v a.low) (U64.v a.high * pow2 64) (U64.v b.low) (U64.v b.high * pow2 64);
assert (U64.v (U64.sub_mod a.high b.high) == U64.v a.high - U64.v b.high + pow2 64);
()
let sub_mod_wrap_ok (a b:t) : Lemma
(requires (v a - v b < 0))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
if U64.v a.low < U64.v b.low
then sub_mod_wrap1_ok a b
else sub_mod_wrap2_ok a b
#push-options "--z3rlimit 40"
let sub_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = (v a - v b) % pow2 128)) =
(if v a - v b >= 0
then sub_mod_pos_ok a b
else sub_mod_wrap_ok a b);
sub_mod_impl a b
#pop-options
val shift_bound : #n:nat -> num:UInt.uint_t n -> n':nat ->
Lemma (num * pow2 n' <= pow2 (n'+n) - pow2 n')
let shift_bound #n num n' =
Math.lemma_mult_le_right (pow2 n') num (pow2 n - 1);
Math.distributivity_sub_left (pow2 n) 1 (pow2 n');
Math.pow2_plus n' n
val append_uint : #n1:nat -> #n2:nat -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 -> UInt.uint_t (n1+n2)
let append_uint #n1 #n2 num1 num2 =
shift_bound num2 n1;
num1 + num2 * pow2 n1
val to_vec_append : #n1:nat{n1 > 0} -> #n2:nat{n2 > 0} -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 ->
Lemma (UInt.to_vec (append_uint num1 num2) == Seq.append (UInt.to_vec num2) (UInt.to_vec num1))
let to_vec_append #n1 #n2 num1 num2 =
UInt.append_lemma (UInt.to_vec num2) (UInt.to_vec num1)
let vec128 (a: t) : BV.bv_t 128 = UInt.to_vec #128 (v a)
let vec64 (a: U64.t) : BV.bv_t 64 = UInt.to_vec (U64.v a)
let to_vec_v (a: t) :
Lemma (vec128 a == Seq.append (vec64 a.high) (vec64 a.low)) =
to_vec_append (U64.v a.low) (U64.v a.high)
val logand_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2) ==
BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2))
(BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logand #128 (v a) (v b))) =
let r = { low = U64.logand a.low b.low;
high = U64.logand a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logand_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logand_vec (vec128 a) (vec128 b));
r
val logxor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2) ==
BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2))
(BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logxor #128 (v a) (v b))) =
let r = { low = U64.logxor a.low b.low;
high = U64.logxor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logxor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logxor_vec (vec128 a) (vec128 b));
r
val logor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2) ==
BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2))
(BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logor #128 (v a) (v b))) =
let r = { low = U64.logor a.low b.low;
high = U64.logor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logor_vec (vec128 a) (vec128 b));
r
val lognot_vec_append (#n1 #n2: pos) (a1: BV.bv_t n1) (a2: BV.bv_t n2) :
Lemma (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2) ==
BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot_vec_append #n1 #n2 a1 a2 =
Seq.lemma_eq_intro (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2))
(BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot (a: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.lognot #128 (v a))) =
let r = { low = U64.lognot a.low;
high = U64.lognot a.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
lognot_vec_append (vec64 a.high) (vec64 a.low);
to_vec_v a;
assert (vec128 r == BV.lognot_vec (vec128 a));
r
let mod_mul_cancel (n:nat) (k:nat{k > 0}) :
Lemma ((n * k) % k == 0) =
mod_spec (n * k) k;
mul_div_cancel n k;
()
let shift_past_mod (n:nat) (k1:nat) (k2:nat{k2 >= k1}) :
Lemma ((n * pow2 k2) % pow2 k1 == 0) =
assert (k2 == (k2 - k1) + k1);
Math.pow2_plus (k2 - k1) k1;
Math.paren_mul_right n (pow2 (k2 - k1)) (pow2 k1);
mod_mul_cancel (n * pow2 (k2 - k1)) (pow2 k1)
val mod_double: a:nat -> k:nat{k>0} ->
Lemma (a % k % k == a % k)
let mod_double a k =
mod_mod a k 1
let shift_left_large_val (#n1:nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s:nat) :
Lemma ((a1 + a2 * pow2 n1) * pow2 s == (a1 * pow2 s + a2 * pow2 (n1+s))) =
Math.distributivity_add_left a1 (a2 * pow2 n1) (pow2 s);
Math.paren_mul_right a2 (pow2 n1) (pow2 s);
Math.pow2_plus n1 s
#push-options "--z3rlimit 40"
let shift_left_large_lemma (#n1: nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s: nat{s >= n2}) :
Lemma (((a1 + a2 * pow2 n1) * pow2 s) % pow2 (n1+n2) ==
(a1 * pow2 s) % pow2 (n1+n2)) =
shift_left_large_val a1 a2 s;
mod_add (a1 * pow2 s) (a2 * pow2 (n1+s)) (pow2 (n1+n2));
shift_past_mod a2 (n1+n2) (n1+s);
mod_double (a1 * pow2 s) (pow2 (n1+n2));
()
#pop-options
val shift_left_large_lemma_t : a:t -> s:nat ->
Lemma (requires (s >= 64))
(ensures ((v a * pow2 s) % pow2 128 ==
(U64.v a.low * pow2 s) % pow2 128))
let shift_left_large_lemma_t a s =
shift_left_large_lemma #64 #64 (U64.v a.low) (U64.v a.high) s
private let u32_64: n:U32.t{U32.v n == 64} = U32.uint_to_t 64
val div_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} -> Lemma
(requires True)
(ensures (a / pow2 (n1 - n2) == a * pow2 n2 / pow2 n1))
let div_pow2_diff a n1 n2 =
Math.pow2_plus n2 (n1-n2);
assert (a * pow2 n2 / pow2 n1 == a * pow2 n2 / (pow2 n2 * pow2 (n1 - n2)));
div_product (a * pow2 n2) (pow2 n2) (pow2 (n1-n2));
mul_div_cancel a (pow2 n2)
val mod_mul_pow2 : n:nat -> e1:nat -> e2:nat ->
Lemma (n % pow2 e1 * pow2 e2 <= pow2 (e1+e2) - pow2 e2)
let mod_mul_pow2 n e1 e2 =
Math.lemma_mod_lt n (pow2 e1);
Math.lemma_mult_le_right (pow2 e2) (n % pow2 e1) (pow2 e1 - 1);
assert (n % pow2 e1 * pow2 e2 <= pow2 e1 * pow2 e2 - pow2 e2);
Math.pow2_plus e1 e2
let pow2_div_bound #b (n:UInt.uint_t b) (s:nat{s <= b}) :
Lemma (n / pow2 s < pow2 (b - s)) =
Math.lemma_div_lt n b s
#push-options "--smtencoding.l_arith_repr native --z3rlimit 40"
let add_u64_shift_left (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r = (U64.v hi * pow2 (U32.v s)) % pow2 64 + U64.v lo / pow2 (64 - U32.v s))) =
let high = U64.shift_left hi s in
let low = U64.shift_right lo (U32.sub u32_64 s) in
let s = U32.v s in
let high_n = U64.v hi % pow2 (64 - s) * pow2 s in
let low_n = U64.v lo / pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 s) (pow2 (64-s));
assert (U64.v high == high_n);
assert (U64.v low == low_n);
pow2_div_bound (U64.v lo) (64-s);
assert (low_n < pow2 s);
mod_mul_pow2 (U64.v hi) (64 - s) s;
U64.add high low
#pop-options
let div_plus_multiple (a:nat) (b:nat) (k:pos) :
Lemma (requires (a < k))
(ensures ((a + b * k) / k == b)) =
Math.division_addition_lemma a k b;
Math.small_division_lemma_1 a k
val div_add_small: n:nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (k1*m / (k1*k2) == (n + k1*m) / (k1*k2)))
let div_add_small n m k1 k2 =
div_product (k1*m) k1 k2;
div_product (n+k1*m) k1 k2;
mul_div_cancel m k1;
assert (k1*m/k1 == m);
div_plus_multiple n m k1
val add_mod_small: n: nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (n + (k1 * m) % (k1 * k2) ==
(n + k1 * m) % (k1 * k2)))
let add_mod_small n m k1 k2 =
mod_spec (k1 * m) (k1 * k2);
mod_spec (n + k1 * m) (k1 * k2);
div_add_small n m k1 k2
let mod_then_mul_64 (n:nat) : Lemma (n % pow2 64 * pow2 64 == n * pow2 64 % pow2 128) =
Math.pow2_plus 64 64;
mod_mul n (pow2 64) (pow2 64)
let mul_abc_to_acb (a b c: int) : Lemma (a * b * c == a * c * b) = ()
let add_u64_shift_left_respec (hi lo:U64.t) (s:U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r ->
U64.v r * pow2 64 ==
(U64.v hi * pow2 64) * pow2 (U32.v s) % pow2 128 +
U64.v lo * pow2 (U32.v s) / pow2 64 * pow2 64)) =
let r = add_u64_shift_left hi lo s in
let hi = U64.v hi in
let lo = U64.v lo in
let s = U32.v s in
// spec of add_u64_shift_left
assert (U64.v r == hi * pow2 s % pow2 64 + lo / pow2 (64 - s));
Math.distributivity_add_left (hi * pow2 s % pow2 64) (lo / pow2 (64-s)) (pow2 64);
mod_then_mul_64 (hi * pow2 s);
assert (hi * pow2 s % pow2 64 * pow2 64 == (hi * pow2 s * pow2 64) % pow2 128);
div_pow2_diff lo 64 s;
assert (lo / pow2 (64-s) == lo * pow2 s / pow2 64);
assert (U64.v r * pow2 64 == hi * pow2 s * pow2 64 % pow2 128 + lo * pow2 s / pow2 64 * pow2 64);
mul_abc_to_acb hi (pow2 s) (pow2 64);
r
val add_mod_small' : n:nat -> m:nat -> k:pos ->
Lemma (requires (n + m % k < k))
(ensures (n + m % k == (n + m) % k))
let add_mod_small' n m k =
Math.lemma_mod_lt (n + m % k) k;
Math.modulo_lemma n k;
mod_add n m k
#push-options "--retry 5"
let shift_t_val (a: t) (s: nat) :
Lemma (v a * pow2 s == U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s)) =
Math.pow2_plus 64 s;
()
#pop-options
val mul_mod_bound : n:nat -> s1:nat -> s2:nat{s2>=s1} ->
Lemma (n * pow2 s1 % pow2 s2 <= pow2 s2 - pow2 s1)
#push-options "--retry 5"
let mul_mod_bound n s1 s2 =
// n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1
// n % pow2 (s2-s1) <= pow2 (s2-s1) - 1
// n % pow2 (s2-s1) * pow2 s1 <= pow2 s2 - pow2 s1
mod_mul n (pow2 s1) (pow2 (s2-s1));
// assert (n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1);
Math.lemma_mod_lt n (pow2 (s2-s1));
Math.lemma_mult_le_right (pow2 s1) (n % pow2 (s2-s1)) (pow2 (s2-s1) - 1);
Math.pow2_plus (s2-s1) s1
#pop-options
let add_lt_le (a a' b b': int) :
Lemma (requires (a < a' /\ b <= b'))
(ensures (a + b < a' + b')) = ()
let u64_pow2_bound (a: UInt.uint_t 64) (s: nat) :
Lemma (a * pow2 s < pow2 (64+s)) =
Math.pow2_plus 64 s;
Math.lemma_mult_le_right (pow2 s) a (pow2 64)
let shift_t_mod_val' (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s) % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
u64_pow2_bound a_l s;
mul_mod_bound a_h (64+s) 128;
// assert (a_h * pow2 (64+s) % pow2 128 <= pow2 128 - pow2 (64+s));
add_lt_le (a_l * pow2 s) (pow2 (64+s)) (a_h * pow2 (64+s) % pow2 128) (pow2 128 - pow2 (64+s));
add_mod_small' (a_l * pow2 s) (a_h * pow2 (64+s)) (pow2 128);
shift_t_val a s;
()
let shift_t_mod_val (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + (U64.v a.high * pow2 64) * pow2 s % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
shift_t_mod_val' a s;
Math.pow2_plus 64 s;
Math.paren_mul_right a_h (pow2 64) (pow2 s);
()
#push-options "--z3rlimit 20"
let shift_left_small (a: t) (s: U32.t) : Pure t
(requires (U32.v s < 64))
(ensures (fun r -> v r = (v a * pow2 (U32.v s)) % pow2 128)) =
if U32.eq s 0ul then a
else
let r = { low = U64.shift_left a.low s;
high = add_u64_shift_left_respec a.high a.low s; } in
let s = U32.v s in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
mod_spec_rew_n (a_l * pow2 s) (pow2 64);
shift_t_mod_val a s;
r
#pop-options
val shift_left_large : a:t -> s:U32.t{U32.v s >= 64 /\ U32.v s < 128} ->
r:t{v r = (v a * pow2 (U32.v s)) % pow2 128}
#push-options "--z3rlimit 50 --retry 5" // sporadically fails
let shift_left_large a s =
let h_shift = U32.sub s u32_64 in
assert (U32.v h_shift < 64);
let r = { low = U64.uint_to_t 0;
high = U64.shift_left a.low h_shift; } in
assert (U64.v r.high == (U64.v a.low * pow2 (U32.v s - 64)) % pow2 64);
mod_mul (U64.v a.low * pow2 (U32.v s - 64)) (pow2 64) (pow2 64);
Math.pow2_plus (U32.v s - 64) 64;
assert (U64.v r.high * pow2 64 == (U64.v a.low * pow2 (U32.v s)) % pow2 128);
shift_left_large_lemma_t a (U32.v s);
r
#pop-options
let shift_left a s =
if (U32.lt s u32_64) then shift_left_small a s
else shift_left_large a s
let add_u64_shift_right (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r == U64.v lo / pow2 (U32.v s) +
U64.v hi * pow2 (64 - U32.v s) % pow2 64)) =
let low = U64.shift_right lo s in
let high = U64.shift_left hi (U32.sub u32_64 s) in
let s = U32.v s in
let low_n = U64.v lo / pow2 s in
let high_n = U64.v hi % pow2 s * pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 (64-s)) (pow2 s);
assert (U64.v high == high_n);
pow2_div_bound (U64.v lo) s;
assert (low_n < pow2 (64 - s));
mod_mul_pow2 (U64.v hi) s (64 - s);
U64.add low high
val mul_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} ->
Lemma (a * pow2 (n1 - n2) == a * pow2 n1 / pow2 n2)
let mul_pow2_diff a n1 n2 =
Math.paren_mul_right a (pow2 (n1-n2)) (pow2 n2);
mul_div_cancel (a * pow2 (n1 - n2)) (pow2 n2);
Math.pow2_plus (n1 - n2) n2
let add_u64_shift_right_respec (hi lo:U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r == U64.v lo / pow2 (U32.v s) +
U64.v hi * pow2 64 / pow2 (U32.v s) % pow2 64)) =
let r = add_u64_shift_right hi lo s in
let s = U32.v s in
mul_pow2_diff (U64.v hi) 64 s;
r
let mul_div_spec (n:nat) (k:pos) : Lemma (n / k * k == n - n % k) = ()
let mul_distr_sub (n1 n2:nat) (k:nat) : Lemma ((n1 - n2) * k == n1 * k - n2 * k) = ()
val div_product_comm : n1:nat -> k1:pos -> k2:pos ->
Lemma (n1 / k1 / k2 == n1 / k2 / k1)
let div_product_comm n1 k1 k2 =
div_product n1 k1 k2;
div_product n1 k2 k1
val shift_right_reconstruct : a_h:UInt.uint_t 64 -> s:nat{s < 64} ->
Lemma (a_h * pow2 (64-s) == a_h / pow2 s * pow2 64 + a_h * pow2 64 / pow2 s % pow2 64)
let shift_right_reconstruct a_h s =
mul_pow2_diff a_h 64 s;
mod_spec_rew_n (a_h * pow2 (64-s)) (pow2 64);
div_product_comm (a_h * pow2 64) (pow2 s) (pow2 64);
mul_div_cancel a_h (pow2 64);
assert (a_h / pow2 s * pow2 64 == a_h * pow2 64 / pow2 s / pow2 64 * pow2 64);
()
val u128_div_pow2 (a: t) (s:nat{s < 64}) :
Lemma (v a / pow2 s == U64.v a.low / pow2 s + U64.v a.high * pow2 (64 - s))
let u128_div_pow2 a s =
Math.pow2_plus (64-s) s;
Math.paren_mul_right (U64.v a.high) (pow2 (64-s)) (pow2 s);
Math.division_addition_lemma (U64.v a.low) (pow2 s) (U64.v a.high * pow2 (64 - s))
let shift_right_small (a: t) (s: U32.t{U32.v s < 64}) : Pure t
(requires True)
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
if U32.eq s 0ul then a
else
let r = { low = add_u64_shift_right_respec a.high a.low s;
high = U64.shift_right a.high s; } in
let a_h = U64.v a.high in
let a_l = U64.v a.low in
let s = U32.v s in
shift_right_reconstruct a_h s;
assert (v r == a_h * pow2 (64-s) + a_l / pow2 s);
u128_div_pow2 a s;
r
let shift_right_large (a: t) (s: U32.t{U32.v s >= 64 /\ U32.v s < 128}) : Pure t
(requires True)
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
let r = { high = U64.uint_to_t 0;
low = U64.shift_right a.high (U32.sub s u32_64); } in
let s = U32.v s in
Math.pow2_plus 64 (s - 64);
div_product (v a) (pow2 64) (pow2 (s - 64));
assert (v a / pow2 s == v a / pow2 64 / pow2 (s - 64));
div_plus_multiple (U64.v a.low) (U64.v a.high) (pow2 64);
r
let shift_right (a: t) (s: U32.t) : Pure t
(requires (U32.v s < 128))
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
if U32.lt s u32_64
then shift_right_small a s
else shift_right_large a s
let eq (a b:t) = U64.eq a.low b.low && U64.eq a.high b.high
let gt (a b:t) = U64.gt a.high b.high ||
(U64.eq a.high b.high && U64.gt a.low b.low)
let lt (a b:t) = U64.lt a.high b.high ||
(U64.eq a.high b.high && U64.lt a.low b.low)
let gte (a b:t) = U64.gt a.high b.high ||
(U64.eq a.high b.high && U64.gte a.low b.low)
let lte (a b:t) = U64.lt a.high b.high ||
(U64.eq a.high b.high && U64.lte a.low b.low)
let u64_logand_comm (a b:U64.t) : Lemma (U64.logand a b == U64.logand b a) =
UInt.logand_commutative (U64.v a) (U64.v b)
val u64_and_0 (a b:U64.t) :
Lemma (U64.v b = 0 ==> U64.v (U64.logand a b) = 0)
[SMTPat (U64.logand a b)]
let u64_and_0 a b = UInt.logand_lemma_1 (U64.v a)
let u64_0_and (a b:U64.t) :
Lemma (U64.v a = 0 ==> U64.v (U64.logand a b) = 0)
[SMTPat (U64.logand a b)] =
u64_logand_comm a b
val u64_1s_and (a b:U64.t) :
Lemma (U64.v a = pow2 64 - 1 /\
U64.v b = pow2 64 - 1 ==> U64.v (U64.logand a b) = pow2 64 - 1)
[SMTPat (U64.logand a b)]
let u64_1s_and a b = UInt.logand_lemma_2 (U64.v a)
let eq_mask (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a = v b ==> v r = pow2 128 - 1) /\ (v a <> v b ==> v r = 0))) =
let mask = U64.logand (U64.eq_mask a.low b.low)
(U64.eq_mask a.high b.high) in
{ low = mask; high = mask; }
private let gte_characterization (a b: t) :
Lemma (v a >= v b ==>
U64.v a.high > U64.v b.high \/
(U64.v a.high = U64.v b.high /\ U64.v a.low >= U64.v b.low)) = ()
private let lt_characterization (a b: t) :
Lemma (v a < v b ==>
U64.v a.high < U64.v b.high \/
(U64.v a.high = U64.v b.high /\ U64.v a.low < U64.v b.low)) = ()
let u64_logor_comm (a b:U64.t) : Lemma (U64.logor a b == U64.logor b a) =
UInt.logor_commutative (U64.v a) (U64.v b)
val u64_or_1 (a b:U64.t) :
Lemma (U64.v b = pow2 64 - 1 ==> U64.v (U64.logor a b) = pow2 64 - 1)
[SMTPat (U64.logor a b)]
let u64_or_1 a b = UInt.logor_lemma_2 (U64.v a)
let u64_1_or (a b:U64.t) :
Lemma (U64.v a = pow2 64 - 1 ==> U64.v (U64.logor a b) = pow2 64 - 1)
[SMTPat (U64.logor a b)] =
u64_logor_comm a b
val u64_or_0 (a b:U64.t) :
Lemma (U64.v a = 0 /\ U64.v b = 0 ==> U64.v (U64.logor a b) = 0)
[SMTPat (U64.logor a b)]
let u64_or_0 a b = UInt.logor_lemma_1 (U64.v a)
val u64_not_0 (a:U64.t) :
Lemma (U64.v a = 0 ==> U64.v (U64.lognot a) = pow2 64 - 1)
[SMTPat (U64.lognot a)]
let u64_not_0 a = UInt.lognot_lemma_1 #64
val u64_not_1 (a:U64.t) :
Lemma (U64.v a = pow2 64 - 1 ==> U64.v (U64.lognot a) = 0)
[SMTPat (U64.lognot a)]
let u64_not_1 a =
UInt.nth_lemma (UInt.lognot #64 (UInt.ones 64)) (UInt.zero 64)
let gte_mask (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a >= v b ==> v r = pow2 128 - 1) /\ (v a < v b ==> v r = 0))) =
let mask_hi_gte = U64.logand (U64.gte_mask a.high b.high)
(U64.lognot (U64.eq_mask a.high b.high)) in
let mask_lo_gte = U64.logand (U64.eq_mask a.high b.high)
(U64.gte_mask a.low b.low) in
let mask = U64.logor mask_hi_gte mask_lo_gte in
gte_characterization a b;
lt_characterization a b;
{ low = mask; high = mask; }
let uint64_to_uint128 (a:U64.t) = { low = a; high = U64.uint_to_t 0; }
let uint128_to_uint64 (a:t) : b:U64.t{U64.v b == v a % pow2 64} = a.low
inline_for_extraction
let u64_l32_mask: x:U64.t{U64.v x == pow2 32 - 1} = U64.uint_to_t 0xffffffff
let u64_mod_32 (a: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r = U64.v a % pow2 32)) =
UInt.logand_mask (U64.v a) 32;
U64.logand a u64_l32_mask
let u64_32_digits (a: U64.t) : Lemma (U64.v a / pow2 32 * pow2 32 + U64.v a % pow2 32 == U64.v a) =
div_mod (U64.v a) (pow2 32)
val mul32_digits : x:UInt.uint_t 64 -> y:UInt.uint_t 32 ->
Lemma (x * y == (x / pow2 32 * y) * pow2 32 + (x % pow2 32) * y)
let mul32_digits x y = ()
let u32_32 : x:U32.t{U32.v x == 32} = U32.uint_to_t 32
#push-options "--z3rlimit 30"
let u32_combine (hi lo: U64.t) : Pure U64.t
(requires (U64.v lo < pow2 32))
(ensures (fun r -> U64.v r = U64.v hi % pow2 32 * pow2 32 + U64.v lo)) =
U64.add lo (U64.shift_left hi u32_32)
#pop-options
let product_bound (a b:nat) (k:pos) :
Lemma (requires (a < k /\ b < k))
(ensures a * b <= k * k - 2*k + 1) =
Math.lemma_mult_le_right b a (k-1);
Math.lemma_mult_le_left (k-1) b (k-1)
val uint_product_bound : #n:nat -> a:UInt.uint_t n -> b:UInt.uint_t n ->
Lemma (a * b <= pow2 (2*n) - 2*(pow2 n) + 1)
let uint_product_bound #n a b =
product_bound a b (pow2 n);
Math.pow2_plus n n
val u32_product_bound : a:nat{a < pow2 32} -> b:nat{b < pow2 32} ->
Lemma (UInt.size (a * b) 64 /\ a * b < pow2 64 - pow2 32 - 1)
let u32_product_bound a b =
uint_product_bound #32 a b
let mul32 x y =
let x0 = u64_mod_32 x in
let x1 = U64.shift_right x u32_32 in
u32_product_bound (U64.v x0) (U32.v y);
let x0y = U64.mul x0 (FStar.Int.Cast.uint32_to_uint64 y) in
let x0yl = u64_mod_32 x0y in
let x0yh = U64.shift_right x0y u32_32 in
u32_product_bound (U64.v x1) (U32.v y);
// not in the original C code
let x1y' = U64.mul x1 (FStar.Int.Cast.uint32_to_uint64 y) in
let x1y = U64.add x1y' x0yh in
// correspondence with C:
// r0 = r.low
// r0 is written using u32_combine hi lo = lo + hi << 32
// r1 = r.high
let r = { low = u32_combine x1y x0yl;
high = U64.shift_right x1y u32_32; } in
u64_32_digits x;
//assert (U64.v x == U64.v x1 * pow2 32 + U64.v x0);
assert (U64.v x0y == U64.v x0 * U32.v y);
u64_32_digits x0y;
//assert (U64.v x0y == U64.v x0yh * pow2 32 + U64.v x0yl);
assert (U64.v x1y' == U64.v x / pow2 32 * U32.v y);
mul32_digits (U64.v x) (U32.v y);
assert (U64.v x * U32.v y == U64.v x1y' * pow2 32 + U64.v x0y);
r
let l32 (x: UInt.uint_t 64) : UInt.uint_t 32 = x % pow2 32
let h32 (x: UInt.uint_t 64) : UInt.uint_t 32 = x / pow2 32
val mul32_bound : x:UInt.uint_t 32 -> y:UInt.uint_t 32 ->
n:UInt.uint_t 64{n < pow2 64 - pow2 32 - 1 /\ n == x * y}
let mul32_bound x y =
u32_product_bound x y;
x * y
let pll (x y: U64.t) : n:UInt.uint_t 64{n < pow2 64 - pow2 32 - 1} =
mul32_bound (l32 (U64.v x)) (l32 (U64.v y)) | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val plh (x y: U64.t) : n: UInt.uint_t 64 {n < pow2 64 - pow2 32 - 1} | [] | FStar.UInt128.plh | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | x: FStar.UInt64.t -> y: FStar.UInt64.t
-> n: FStar.UInt.uint_t 64 {n < Prims.pow2 64 - Prims.pow2 32 - 1} | {
"end_col": 45,
"end_line": 979,
"start_col": 2,
"start_line": 979
} |
FStar.Pervasives.Lemma | val u64_or_0 (a b:U64.t) :
Lemma (U64.v a = 0 /\ U64.v b = 0 ==> U64.v (U64.logor a b) = 0)
[SMTPat (U64.logor a b)] | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let u64_or_0 a b = UInt.logor_lemma_1 (U64.v a) | val u64_or_0 (a b:U64.t) :
Lemma (U64.v a = 0 /\ U64.v b = 0 ==> U64.v (U64.logor a b) = 0)
[SMTPat (U64.logor a b)]
let u64_or_0 a b = | false | null | true | UInt.logor_lemma_1 (U64.v a) | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [
"lemma"
] | [
"FStar.UInt64.t",
"FStar.UInt.logor_lemma_1",
"FStar.UInt64.n",
"FStar.UInt64.v",
"Prims.unit"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract
let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63
let carry_uint64_ok (a b:uint_t 64)
: squash (int2bv (carry_uint64 a b) == carry_bv a b)
= _ by (T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl())
let fact1 (a b: uint_t 64) = carry_bv a b == int2bv 1
let fact0 (a b: uint_t 64) = carry_bv a b == int2bv 0
let lem_ult_1 (a b: uint_t 64)
: squash (bvult (int2bv a) (int2bv b) ==> fact1 a b)
= assert (bvult (int2bv a) (int2bv b) ==> fact1 a b)
by (T.norm [delta_only [`%fact1;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'";
smt())
let lem_ult_2 (a b:uint_t 64)
: squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
= assert (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
by (T.norm [delta_only [`%fact0;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'")
let int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b))
= introduce (a < b) ==> (bvult #n (int2bv #n a) (int2bv #n b))
with _ . FStar.BV.int2bv_lemma_ult_1 a b;
introduce (bvult #n (int2bv #n a) (int2bv #n b)) ==> (a < b)
with _ . FStar.BV.int2bv_lemma_ult_2 a b
let lem_ult (a b:uint_t 64)
: Lemma (if a < b
then fact1 a b
else fact0 a b)
= int2bv_ult a b;
lem_ult_1 a b;
lem_ult_2 a b
let constant_time_carry (a b: U64.t) : Tot U64.t =
let open U64 in
// CONSTANT_TIME_CARRY macro
// ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
// 63 = sizeof(a) * 8 - 1
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63ul
let carry_uint64_equiv (a b:UInt64.t)
: Lemma (U64.v (constant_time_carry a b) == carry_uint64 (U64.v a) (U64.v b))
= ()
// This type gets a special treatment in KaRaMeL and its definition is never
// printed in the resulting C file.
type uint128: Type0 = { low: U64.t; high: U64.t }
let t = uint128
let _ = intro_ambient n
let _ = intro_ambient t
[@@ noextract_to "krml"]
let v x = U64.v x.low + (U64.v x.high) * (pow2 64)
let div_mod (x:nat) (k:nat{k > 0}) : Lemma (x / k * k + x % k == x) = ()
let uint_to_t x =
div_mod x (pow2 64);
{ low = U64.uint_to_t (x % (pow2 64));
high = U64.uint_to_t (x / (pow2 64)); }
let v_inj (x1 x2: t): Lemma (requires (v x1 == v x2))
(ensures x1 == x2) =
assert (uint_to_t (v x1) == uint_to_t (v x2));
assert (uint_to_t (v x1) == x1);
assert (uint_to_t (v x2) == x2);
()
(* A weird helper used below... seems like the native encoding of
bitvectors may be making these proofs much harder than they should be? *)
let bv2int_fun (#n:pos) (a b : bv_t n)
: Lemma (requires a == b)
(ensures bv2int a == bv2int b)
= ()
(* This proof is quite brittle. It has a bunch of annotations to get
decent verification performance. *)
let constant_time_carry_ok (a b:U64.t)
: Lemma (constant_time_carry a b ==
(if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0))
= calc (==) {
U64.v (constant_time_carry a b);
(==) { carry_uint64_equiv a b }
carry_uint64 (U64.v a) (U64.v b);
(==) { inverse_num_lemma (carry_uint64 (U64.v a) (U64.v b)) }
bv2int (int2bv (carry_uint64 (U64.v a) (U64.v b)));
(==) { carry_uint64_ok (U64.v a) (U64.v b);
bv2int_fun (int2bv (carry_uint64 (U64.v a) (U64.v b))) (carry_bv (U64.v a) (U64.v b));
()
}
bv2int (carry_bv (U64.v a) (U64.v b));
(==) {
lem_ult (U64.v a) (U64.v b);
bv2int_fun (carry_bv (U64.v a) (U64.v b)) (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
}
bv2int
(if U64.v a < U64.v b
then int2bv 1
else int2bv 0);
};
assert (
bv2int (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
== U64.v (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)) by (T.norm []);
U64.v_inj (constant_time_carry a b) (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)
let carry (a b: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r == (if U64.v a < U64.v b then 1 else 0))) =
constant_time_carry_ok a b;
constant_time_carry a b
let carry_sum_ok (a b:U64.t) :
Lemma (U64.v (carry (U64.add_mod a b) b) == (U64.v a + U64.v b) / (pow2 64)) = ()
let add (a b: t) : Pure t
(requires (v a + v b < pow2 128))
(ensures (fun r -> v a + v b = v r)) =
let l = U64.add_mod a.low b.low in
carry_sum_ok a.low b.low;
{ low = l;
high = U64.add (U64.add a.high b.high) (carry l b.low); }
let add_underspec (a b: t) =
let l = U64.add_mod a.low b.low in
begin
if v a + v b < pow2 128
then carry_sum_ok a.low b.low
else ()
end;
{ low = l;
high = U64.add_underspec (U64.add_underspec a.high b.high) (carry l b.low); }
val mod_mod: a:nat -> k:nat{k>0} -> k':nat{k'>0} ->
Lemma ((a % k) % (k'*k) == a % k)
let mod_mod a k k' =
assert (a % k < k);
assert (a % k < k' * k)
let mod_spec (a:nat) (k:nat{k > 0}) :
Lemma (a % k == a - a / k * k) = ()
val div_product : n:nat -> m1:nat{m1>0} -> m2:nat{m2>0} ->
Lemma (n / (m1*m2) == (n / m1) / m2)
let div_product n m1 m2 =
Math.division_multiplication_lemma n m1 m2
val mul_div_cancel : n:nat -> k:nat{k>0} ->
Lemma ((n * k) / k == n)
let mul_div_cancel n k =
Math.cancel_mul_div n k
val mod_mul: n:nat -> k1:pos -> k2:pos ->
Lemma ((n % k2) * k1 == (n * k1) % (k1*k2))
let mod_mul n k1 k2 =
Math.modulo_scale_lemma n k1 k2
let mod_spec_rew_n (n:nat) (k:nat{k > 0}) :
Lemma (n == n / k * k + n % k) = mod_spec n k
val mod_add: n1:nat -> n2:nat -> k:nat{k > 0} ->
Lemma ((n1 % k + n2 % k) % k == (n1 + n2) % k)
let mod_add n1 n2 k = Math.modulo_distributivity n1 n2 k
val mod_add_small: n1:nat -> n2:nat -> k:nat{k > 0} -> Lemma
(requires (n1 % k + n2 % k < k))
(ensures (n1 % k + n2 % k == (n1 + n2) % k))
let mod_add_small n1 n2 k =
mod_add n1 n2 k;
Math.small_modulo_lemma_1 (n1%k + n2%k) k
// This proof is pretty stable with the calc proof, but it can fail
// ~1% of the times, so add a retry.
#push-options "--split_queries no --z3rlimit 20 --retry 5"
let add_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a + v b) % pow2 128 = v r)) =
let l = U64.add_mod a.low b.low in
let r = { low = l;
high = U64.add_mod (U64.add_mod a.high b.high) (carry l b.low)} in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
let b_l = U64.v b.low in
let b_h = U64.v b.high in
carry_sum_ok a.low b.low;
Math.lemma_mod_plus_distr_l (a_h + b_h) ((a_l + b_l) / (pow2 64)) (pow2 64);
calc (==) {
U64.v r.high * pow2 64;
== {}
((a_h + b_h + (a_l + b_l) / (pow2 64)) % pow2 64) * pow2 64;
== { mod_mul (a_h + b_h + (a_l + b_l) / (pow2 64)) (pow2 64) (pow2 64) }
((a_h + b_h + (a_l + b_l) / (pow2 64)) * pow2 64) % (pow2 64 * pow2 64);
== {}
((a_h + b_h + (a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
== {}
(a_h * pow2 64 + b_h * pow2 64 + ((a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
};
assert (U64.v r.low == (U64.v r.low) % pow2 128);
mod_add_small (a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64))
((a_l + b_l) % (pow2 64))
(pow2 128);
assert (U64.v r.low + U64.v r.high * pow2 64 ==
(a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64) + (a_l + b_l) % (pow2 64)) % pow2 128);
mod_spec_rew_n (a_l + b_l) (pow2 64);
assert (v r ==
(a_h * pow2 64 +
b_h * pow2 64 +
a_l + b_l) % pow2 128);
assert_spinoff ((v a + v b) % pow2 128 = v r);
r
#pop-options
#push-options "--retry 5"
let sub (a b: t) : Pure t
(requires (v a - v b >= 0))
(ensures (fun r -> v r = v a - v b)) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub (U64.sub a.high b.high) (carry a.low l); }
#pop-options
let sub_underspec (a b: t) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_underspec (U64.sub_underspec a.high b.high) (carry a.low l); }
let sub_mod_impl (a b: t) : t =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_mod (U64.sub_mod a.high b.high) (carry a.low l); }
#push-options "--retry 10" // flaky
let sub_mod_pos_ok (a b:t) : Lemma
(requires (v a - v b >= 0))
(ensures (v (sub_mod_impl a b) = v a - v b)) =
assert (sub a b == sub_mod_impl a b);
()
#pop-options
val u64_diff_wrap : a:U64.t -> b:U64.t ->
Lemma (requires (U64.v a < U64.v b))
(ensures (U64.v (U64.sub_mod a b) == U64.v a - U64.v b + pow2 64))
let u64_diff_wrap a b = ()
#push-options "--z3rlimit 20"
val sub_mod_wrap1_ok : a:t -> b:t -> Lemma
(requires (v a - v b < 0 /\ U64.v a.low < U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128))
#push-options "--retry 10"
let sub_mod_wrap1_ok a b =
// carry == 1 and subtraction in low wraps
let l = U64.sub_mod a.low b.low in
assert (U64.v (carry a.low l) == 1);
u64_diff_wrap a.low b.low;
// a.high <= b.high since v a < v b;
// case split on equality and strictly less
if U64.v a.high = U64.v b.high then ()
else begin
u64_diff_wrap a.high b.high;
()
end
#pop-options
let sum_lt (a1 a2 b1 b2:nat) : Lemma
(requires (a1 + a2 < b1 + b2 /\ a1 >= b1))
(ensures (a2 < b2)) = ()
let sub_mod_wrap2_ok (a b:t) : Lemma
(requires (v a - v b < 0 /\ U64.v a.low >= U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
// carry == 0, subtraction in low is exact, but subtraction in high
// must wrap since v a < v b
let l = U64.sub_mod a.low b.low in
let r = sub_mod_impl a b in
assert (U64.v l == U64.v a.low - U64.v b.low);
assert (U64.v (carry a.low l) == 0);
sum_lt (U64.v a.low) (U64.v a.high * pow2 64) (U64.v b.low) (U64.v b.high * pow2 64);
assert (U64.v (U64.sub_mod a.high b.high) == U64.v a.high - U64.v b.high + pow2 64);
()
let sub_mod_wrap_ok (a b:t) : Lemma
(requires (v a - v b < 0))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
if U64.v a.low < U64.v b.low
then sub_mod_wrap1_ok a b
else sub_mod_wrap2_ok a b
#push-options "--z3rlimit 40"
let sub_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = (v a - v b) % pow2 128)) =
(if v a - v b >= 0
then sub_mod_pos_ok a b
else sub_mod_wrap_ok a b);
sub_mod_impl a b
#pop-options
val shift_bound : #n:nat -> num:UInt.uint_t n -> n':nat ->
Lemma (num * pow2 n' <= pow2 (n'+n) - pow2 n')
let shift_bound #n num n' =
Math.lemma_mult_le_right (pow2 n') num (pow2 n - 1);
Math.distributivity_sub_left (pow2 n) 1 (pow2 n');
Math.pow2_plus n' n
val append_uint : #n1:nat -> #n2:nat -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 -> UInt.uint_t (n1+n2)
let append_uint #n1 #n2 num1 num2 =
shift_bound num2 n1;
num1 + num2 * pow2 n1
val to_vec_append : #n1:nat{n1 > 0} -> #n2:nat{n2 > 0} -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 ->
Lemma (UInt.to_vec (append_uint num1 num2) == Seq.append (UInt.to_vec num2) (UInt.to_vec num1))
let to_vec_append #n1 #n2 num1 num2 =
UInt.append_lemma (UInt.to_vec num2) (UInt.to_vec num1)
let vec128 (a: t) : BV.bv_t 128 = UInt.to_vec #128 (v a)
let vec64 (a: U64.t) : BV.bv_t 64 = UInt.to_vec (U64.v a)
let to_vec_v (a: t) :
Lemma (vec128 a == Seq.append (vec64 a.high) (vec64 a.low)) =
to_vec_append (U64.v a.low) (U64.v a.high)
val logand_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2) ==
BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2))
(BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logand #128 (v a) (v b))) =
let r = { low = U64.logand a.low b.low;
high = U64.logand a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logand_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logand_vec (vec128 a) (vec128 b));
r
val logxor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2) ==
BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2))
(BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logxor #128 (v a) (v b))) =
let r = { low = U64.logxor a.low b.low;
high = U64.logxor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logxor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logxor_vec (vec128 a) (vec128 b));
r
val logor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2) ==
BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2))
(BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logor #128 (v a) (v b))) =
let r = { low = U64.logor a.low b.low;
high = U64.logor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logor_vec (vec128 a) (vec128 b));
r
val lognot_vec_append (#n1 #n2: pos) (a1: BV.bv_t n1) (a2: BV.bv_t n2) :
Lemma (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2) ==
BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot_vec_append #n1 #n2 a1 a2 =
Seq.lemma_eq_intro (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2))
(BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot (a: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.lognot #128 (v a))) =
let r = { low = U64.lognot a.low;
high = U64.lognot a.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
lognot_vec_append (vec64 a.high) (vec64 a.low);
to_vec_v a;
assert (vec128 r == BV.lognot_vec (vec128 a));
r
let mod_mul_cancel (n:nat) (k:nat{k > 0}) :
Lemma ((n * k) % k == 0) =
mod_spec (n * k) k;
mul_div_cancel n k;
()
let shift_past_mod (n:nat) (k1:nat) (k2:nat{k2 >= k1}) :
Lemma ((n * pow2 k2) % pow2 k1 == 0) =
assert (k2 == (k2 - k1) + k1);
Math.pow2_plus (k2 - k1) k1;
Math.paren_mul_right n (pow2 (k2 - k1)) (pow2 k1);
mod_mul_cancel (n * pow2 (k2 - k1)) (pow2 k1)
val mod_double: a:nat -> k:nat{k>0} ->
Lemma (a % k % k == a % k)
let mod_double a k =
mod_mod a k 1
let shift_left_large_val (#n1:nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s:nat) :
Lemma ((a1 + a2 * pow2 n1) * pow2 s == (a1 * pow2 s + a2 * pow2 (n1+s))) =
Math.distributivity_add_left a1 (a2 * pow2 n1) (pow2 s);
Math.paren_mul_right a2 (pow2 n1) (pow2 s);
Math.pow2_plus n1 s
#push-options "--z3rlimit 40"
let shift_left_large_lemma (#n1: nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s: nat{s >= n2}) :
Lemma (((a1 + a2 * pow2 n1) * pow2 s) % pow2 (n1+n2) ==
(a1 * pow2 s) % pow2 (n1+n2)) =
shift_left_large_val a1 a2 s;
mod_add (a1 * pow2 s) (a2 * pow2 (n1+s)) (pow2 (n1+n2));
shift_past_mod a2 (n1+n2) (n1+s);
mod_double (a1 * pow2 s) (pow2 (n1+n2));
()
#pop-options
val shift_left_large_lemma_t : a:t -> s:nat ->
Lemma (requires (s >= 64))
(ensures ((v a * pow2 s) % pow2 128 ==
(U64.v a.low * pow2 s) % pow2 128))
let shift_left_large_lemma_t a s =
shift_left_large_lemma #64 #64 (U64.v a.low) (U64.v a.high) s
private let u32_64: n:U32.t{U32.v n == 64} = U32.uint_to_t 64
val div_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} -> Lemma
(requires True)
(ensures (a / pow2 (n1 - n2) == a * pow2 n2 / pow2 n1))
let div_pow2_diff a n1 n2 =
Math.pow2_plus n2 (n1-n2);
assert (a * pow2 n2 / pow2 n1 == a * pow2 n2 / (pow2 n2 * pow2 (n1 - n2)));
div_product (a * pow2 n2) (pow2 n2) (pow2 (n1-n2));
mul_div_cancel a (pow2 n2)
val mod_mul_pow2 : n:nat -> e1:nat -> e2:nat ->
Lemma (n % pow2 e1 * pow2 e2 <= pow2 (e1+e2) - pow2 e2)
let mod_mul_pow2 n e1 e2 =
Math.lemma_mod_lt n (pow2 e1);
Math.lemma_mult_le_right (pow2 e2) (n % pow2 e1) (pow2 e1 - 1);
assert (n % pow2 e1 * pow2 e2 <= pow2 e1 * pow2 e2 - pow2 e2);
Math.pow2_plus e1 e2
let pow2_div_bound #b (n:UInt.uint_t b) (s:nat{s <= b}) :
Lemma (n / pow2 s < pow2 (b - s)) =
Math.lemma_div_lt n b s
#push-options "--smtencoding.l_arith_repr native --z3rlimit 40"
let add_u64_shift_left (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r = (U64.v hi * pow2 (U32.v s)) % pow2 64 + U64.v lo / pow2 (64 - U32.v s))) =
let high = U64.shift_left hi s in
let low = U64.shift_right lo (U32.sub u32_64 s) in
let s = U32.v s in
let high_n = U64.v hi % pow2 (64 - s) * pow2 s in
let low_n = U64.v lo / pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 s) (pow2 (64-s));
assert (U64.v high == high_n);
assert (U64.v low == low_n);
pow2_div_bound (U64.v lo) (64-s);
assert (low_n < pow2 s);
mod_mul_pow2 (U64.v hi) (64 - s) s;
U64.add high low
#pop-options
let div_plus_multiple (a:nat) (b:nat) (k:pos) :
Lemma (requires (a < k))
(ensures ((a + b * k) / k == b)) =
Math.division_addition_lemma a k b;
Math.small_division_lemma_1 a k
val div_add_small: n:nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (k1*m / (k1*k2) == (n + k1*m) / (k1*k2)))
let div_add_small n m k1 k2 =
div_product (k1*m) k1 k2;
div_product (n+k1*m) k1 k2;
mul_div_cancel m k1;
assert (k1*m/k1 == m);
div_plus_multiple n m k1
val add_mod_small: n: nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (n + (k1 * m) % (k1 * k2) ==
(n + k1 * m) % (k1 * k2)))
let add_mod_small n m k1 k2 =
mod_spec (k1 * m) (k1 * k2);
mod_spec (n + k1 * m) (k1 * k2);
div_add_small n m k1 k2
let mod_then_mul_64 (n:nat) : Lemma (n % pow2 64 * pow2 64 == n * pow2 64 % pow2 128) =
Math.pow2_plus 64 64;
mod_mul n (pow2 64) (pow2 64)
let mul_abc_to_acb (a b c: int) : Lemma (a * b * c == a * c * b) = ()
let add_u64_shift_left_respec (hi lo:U64.t) (s:U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r ->
U64.v r * pow2 64 ==
(U64.v hi * pow2 64) * pow2 (U32.v s) % pow2 128 +
U64.v lo * pow2 (U32.v s) / pow2 64 * pow2 64)) =
let r = add_u64_shift_left hi lo s in
let hi = U64.v hi in
let lo = U64.v lo in
let s = U32.v s in
// spec of add_u64_shift_left
assert (U64.v r == hi * pow2 s % pow2 64 + lo / pow2 (64 - s));
Math.distributivity_add_left (hi * pow2 s % pow2 64) (lo / pow2 (64-s)) (pow2 64);
mod_then_mul_64 (hi * pow2 s);
assert (hi * pow2 s % pow2 64 * pow2 64 == (hi * pow2 s * pow2 64) % pow2 128);
div_pow2_diff lo 64 s;
assert (lo / pow2 (64-s) == lo * pow2 s / pow2 64);
assert (U64.v r * pow2 64 == hi * pow2 s * pow2 64 % pow2 128 + lo * pow2 s / pow2 64 * pow2 64);
mul_abc_to_acb hi (pow2 s) (pow2 64);
r
val add_mod_small' : n:nat -> m:nat -> k:pos ->
Lemma (requires (n + m % k < k))
(ensures (n + m % k == (n + m) % k))
let add_mod_small' n m k =
Math.lemma_mod_lt (n + m % k) k;
Math.modulo_lemma n k;
mod_add n m k
#push-options "--retry 5"
let shift_t_val (a: t) (s: nat) :
Lemma (v a * pow2 s == U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s)) =
Math.pow2_plus 64 s;
()
#pop-options
val mul_mod_bound : n:nat -> s1:nat -> s2:nat{s2>=s1} ->
Lemma (n * pow2 s1 % pow2 s2 <= pow2 s2 - pow2 s1)
#push-options "--retry 5"
let mul_mod_bound n s1 s2 =
// n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1
// n % pow2 (s2-s1) <= pow2 (s2-s1) - 1
// n % pow2 (s2-s1) * pow2 s1 <= pow2 s2 - pow2 s1
mod_mul n (pow2 s1) (pow2 (s2-s1));
// assert (n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1);
Math.lemma_mod_lt n (pow2 (s2-s1));
Math.lemma_mult_le_right (pow2 s1) (n % pow2 (s2-s1)) (pow2 (s2-s1) - 1);
Math.pow2_plus (s2-s1) s1
#pop-options
let add_lt_le (a a' b b': int) :
Lemma (requires (a < a' /\ b <= b'))
(ensures (a + b < a' + b')) = ()
let u64_pow2_bound (a: UInt.uint_t 64) (s: nat) :
Lemma (a * pow2 s < pow2 (64+s)) =
Math.pow2_plus 64 s;
Math.lemma_mult_le_right (pow2 s) a (pow2 64)
let shift_t_mod_val' (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s) % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
u64_pow2_bound a_l s;
mul_mod_bound a_h (64+s) 128;
// assert (a_h * pow2 (64+s) % pow2 128 <= pow2 128 - pow2 (64+s));
add_lt_le (a_l * pow2 s) (pow2 (64+s)) (a_h * pow2 (64+s) % pow2 128) (pow2 128 - pow2 (64+s));
add_mod_small' (a_l * pow2 s) (a_h * pow2 (64+s)) (pow2 128);
shift_t_val a s;
()
let shift_t_mod_val (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + (U64.v a.high * pow2 64) * pow2 s % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
shift_t_mod_val' a s;
Math.pow2_plus 64 s;
Math.paren_mul_right a_h (pow2 64) (pow2 s);
()
#push-options "--z3rlimit 20"
let shift_left_small (a: t) (s: U32.t) : Pure t
(requires (U32.v s < 64))
(ensures (fun r -> v r = (v a * pow2 (U32.v s)) % pow2 128)) =
if U32.eq s 0ul then a
else
let r = { low = U64.shift_left a.low s;
high = add_u64_shift_left_respec a.high a.low s; } in
let s = U32.v s in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
mod_spec_rew_n (a_l * pow2 s) (pow2 64);
shift_t_mod_val a s;
r
#pop-options
val shift_left_large : a:t -> s:U32.t{U32.v s >= 64 /\ U32.v s < 128} ->
r:t{v r = (v a * pow2 (U32.v s)) % pow2 128}
#push-options "--z3rlimit 50 --retry 5" // sporadically fails
let shift_left_large a s =
let h_shift = U32.sub s u32_64 in
assert (U32.v h_shift < 64);
let r = { low = U64.uint_to_t 0;
high = U64.shift_left a.low h_shift; } in
assert (U64.v r.high == (U64.v a.low * pow2 (U32.v s - 64)) % pow2 64);
mod_mul (U64.v a.low * pow2 (U32.v s - 64)) (pow2 64) (pow2 64);
Math.pow2_plus (U32.v s - 64) 64;
assert (U64.v r.high * pow2 64 == (U64.v a.low * pow2 (U32.v s)) % pow2 128);
shift_left_large_lemma_t a (U32.v s);
r
#pop-options
let shift_left a s =
if (U32.lt s u32_64) then shift_left_small a s
else shift_left_large a s
let add_u64_shift_right (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r == U64.v lo / pow2 (U32.v s) +
U64.v hi * pow2 (64 - U32.v s) % pow2 64)) =
let low = U64.shift_right lo s in
let high = U64.shift_left hi (U32.sub u32_64 s) in
let s = U32.v s in
let low_n = U64.v lo / pow2 s in
let high_n = U64.v hi % pow2 s * pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 (64-s)) (pow2 s);
assert (U64.v high == high_n);
pow2_div_bound (U64.v lo) s;
assert (low_n < pow2 (64 - s));
mod_mul_pow2 (U64.v hi) s (64 - s);
U64.add low high
val mul_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} ->
Lemma (a * pow2 (n1 - n2) == a * pow2 n1 / pow2 n2)
let mul_pow2_diff a n1 n2 =
Math.paren_mul_right a (pow2 (n1-n2)) (pow2 n2);
mul_div_cancel (a * pow2 (n1 - n2)) (pow2 n2);
Math.pow2_plus (n1 - n2) n2
let add_u64_shift_right_respec (hi lo:U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r == U64.v lo / pow2 (U32.v s) +
U64.v hi * pow2 64 / pow2 (U32.v s) % pow2 64)) =
let r = add_u64_shift_right hi lo s in
let s = U32.v s in
mul_pow2_diff (U64.v hi) 64 s;
r
let mul_div_spec (n:nat) (k:pos) : Lemma (n / k * k == n - n % k) = ()
let mul_distr_sub (n1 n2:nat) (k:nat) : Lemma ((n1 - n2) * k == n1 * k - n2 * k) = ()
val div_product_comm : n1:nat -> k1:pos -> k2:pos ->
Lemma (n1 / k1 / k2 == n1 / k2 / k1)
let div_product_comm n1 k1 k2 =
div_product n1 k1 k2;
div_product n1 k2 k1
val shift_right_reconstruct : a_h:UInt.uint_t 64 -> s:nat{s < 64} ->
Lemma (a_h * pow2 (64-s) == a_h / pow2 s * pow2 64 + a_h * pow2 64 / pow2 s % pow2 64)
let shift_right_reconstruct a_h s =
mul_pow2_diff a_h 64 s;
mod_spec_rew_n (a_h * pow2 (64-s)) (pow2 64);
div_product_comm (a_h * pow2 64) (pow2 s) (pow2 64);
mul_div_cancel a_h (pow2 64);
assert (a_h / pow2 s * pow2 64 == a_h * pow2 64 / pow2 s / pow2 64 * pow2 64);
()
val u128_div_pow2 (a: t) (s:nat{s < 64}) :
Lemma (v a / pow2 s == U64.v a.low / pow2 s + U64.v a.high * pow2 (64 - s))
let u128_div_pow2 a s =
Math.pow2_plus (64-s) s;
Math.paren_mul_right (U64.v a.high) (pow2 (64-s)) (pow2 s);
Math.division_addition_lemma (U64.v a.low) (pow2 s) (U64.v a.high * pow2 (64 - s))
let shift_right_small (a: t) (s: U32.t{U32.v s < 64}) : Pure t
(requires True)
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
if U32.eq s 0ul then a
else
let r = { low = add_u64_shift_right_respec a.high a.low s;
high = U64.shift_right a.high s; } in
let a_h = U64.v a.high in
let a_l = U64.v a.low in
let s = U32.v s in
shift_right_reconstruct a_h s;
assert (v r == a_h * pow2 (64-s) + a_l / pow2 s);
u128_div_pow2 a s;
r
let shift_right_large (a: t) (s: U32.t{U32.v s >= 64 /\ U32.v s < 128}) : Pure t
(requires True)
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
let r = { high = U64.uint_to_t 0;
low = U64.shift_right a.high (U32.sub s u32_64); } in
let s = U32.v s in
Math.pow2_plus 64 (s - 64);
div_product (v a) (pow2 64) (pow2 (s - 64));
assert (v a / pow2 s == v a / pow2 64 / pow2 (s - 64));
div_plus_multiple (U64.v a.low) (U64.v a.high) (pow2 64);
r
let shift_right (a: t) (s: U32.t) : Pure t
(requires (U32.v s < 128))
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
if U32.lt s u32_64
then shift_right_small a s
else shift_right_large a s
let eq (a b:t) = U64.eq a.low b.low && U64.eq a.high b.high
let gt (a b:t) = U64.gt a.high b.high ||
(U64.eq a.high b.high && U64.gt a.low b.low)
let lt (a b:t) = U64.lt a.high b.high ||
(U64.eq a.high b.high && U64.lt a.low b.low)
let gte (a b:t) = U64.gt a.high b.high ||
(U64.eq a.high b.high && U64.gte a.low b.low)
let lte (a b:t) = U64.lt a.high b.high ||
(U64.eq a.high b.high && U64.lte a.low b.low)
let u64_logand_comm (a b:U64.t) : Lemma (U64.logand a b == U64.logand b a) =
UInt.logand_commutative (U64.v a) (U64.v b)
val u64_and_0 (a b:U64.t) :
Lemma (U64.v b = 0 ==> U64.v (U64.logand a b) = 0)
[SMTPat (U64.logand a b)]
let u64_and_0 a b = UInt.logand_lemma_1 (U64.v a)
let u64_0_and (a b:U64.t) :
Lemma (U64.v a = 0 ==> U64.v (U64.logand a b) = 0)
[SMTPat (U64.logand a b)] =
u64_logand_comm a b
val u64_1s_and (a b:U64.t) :
Lemma (U64.v a = pow2 64 - 1 /\
U64.v b = pow2 64 - 1 ==> U64.v (U64.logand a b) = pow2 64 - 1)
[SMTPat (U64.logand a b)]
let u64_1s_and a b = UInt.logand_lemma_2 (U64.v a)
let eq_mask (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a = v b ==> v r = pow2 128 - 1) /\ (v a <> v b ==> v r = 0))) =
let mask = U64.logand (U64.eq_mask a.low b.low)
(U64.eq_mask a.high b.high) in
{ low = mask; high = mask; }
private let gte_characterization (a b: t) :
Lemma (v a >= v b ==>
U64.v a.high > U64.v b.high \/
(U64.v a.high = U64.v b.high /\ U64.v a.low >= U64.v b.low)) = ()
private let lt_characterization (a b: t) :
Lemma (v a < v b ==>
U64.v a.high < U64.v b.high \/
(U64.v a.high = U64.v b.high /\ U64.v a.low < U64.v b.low)) = ()
let u64_logor_comm (a b:U64.t) : Lemma (U64.logor a b == U64.logor b a) =
UInt.logor_commutative (U64.v a) (U64.v b)
val u64_or_1 (a b:U64.t) :
Lemma (U64.v b = pow2 64 - 1 ==> U64.v (U64.logor a b) = pow2 64 - 1)
[SMTPat (U64.logor a b)]
let u64_or_1 a b = UInt.logor_lemma_2 (U64.v a)
let u64_1_or (a b:U64.t) :
Lemma (U64.v a = pow2 64 - 1 ==> U64.v (U64.logor a b) = pow2 64 - 1)
[SMTPat (U64.logor a b)] =
u64_logor_comm a b
val u64_or_0 (a b:U64.t) :
Lemma (U64.v a = 0 /\ U64.v b = 0 ==> U64.v (U64.logor a b) = 0) | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val u64_or_0 (a b:U64.t) :
Lemma (U64.v a = 0 /\ U64.v b = 0 ==> U64.v (U64.logor a b) = 0)
[SMTPat (U64.logor a b)] | [] | FStar.UInt128.u64_or_0 | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: FStar.UInt64.t -> b: FStar.UInt64.t
-> FStar.Pervasives.Lemma
(ensures
FStar.UInt64.v a = 0 /\ FStar.UInt64.v b = 0 ==> FStar.UInt64.v (FStar.UInt64.logor a b) = 0
) [SMTPat (FStar.UInt64.logor a b)] | {
"end_col": 47,
"end_line": 869,
"start_col": 19,
"start_line": 869
} |
FStar.Pervasives.Lemma | val mod_add: n1:nat -> n2:nat -> k:nat{k > 0} ->
Lemma ((n1 % k + n2 % k) % k == (n1 + n2) % k) | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let mod_add n1 n2 k = Math.modulo_distributivity n1 n2 k | val mod_add: n1:nat -> n2:nat -> k:nat{k > 0} ->
Lemma ((n1 % k + n2 % k) % k == (n1 + n2) % k)
let mod_add n1 n2 k = | false | null | true | Math.modulo_distributivity n1 n2 k | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [
"lemma"
] | [
"Prims.nat",
"Prims.b2t",
"Prims.op_GreaterThan",
"FStar.Math.Lemmas.modulo_distributivity",
"Prims.unit"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract
let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63
let carry_uint64_ok (a b:uint_t 64)
: squash (int2bv (carry_uint64 a b) == carry_bv a b)
= _ by (T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl())
let fact1 (a b: uint_t 64) = carry_bv a b == int2bv 1
let fact0 (a b: uint_t 64) = carry_bv a b == int2bv 0
let lem_ult_1 (a b: uint_t 64)
: squash (bvult (int2bv a) (int2bv b) ==> fact1 a b)
= assert (bvult (int2bv a) (int2bv b) ==> fact1 a b)
by (T.norm [delta_only [`%fact1;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'";
smt())
let lem_ult_2 (a b:uint_t 64)
: squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
= assert (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
by (T.norm [delta_only [`%fact0;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'")
let int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b))
= introduce (a < b) ==> (bvult #n (int2bv #n a) (int2bv #n b))
with _ . FStar.BV.int2bv_lemma_ult_1 a b;
introduce (bvult #n (int2bv #n a) (int2bv #n b)) ==> (a < b)
with _ . FStar.BV.int2bv_lemma_ult_2 a b
let lem_ult (a b:uint_t 64)
: Lemma (if a < b
then fact1 a b
else fact0 a b)
= int2bv_ult a b;
lem_ult_1 a b;
lem_ult_2 a b
let constant_time_carry (a b: U64.t) : Tot U64.t =
let open U64 in
// CONSTANT_TIME_CARRY macro
// ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
// 63 = sizeof(a) * 8 - 1
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63ul
let carry_uint64_equiv (a b:UInt64.t)
: Lemma (U64.v (constant_time_carry a b) == carry_uint64 (U64.v a) (U64.v b))
= ()
// This type gets a special treatment in KaRaMeL and its definition is never
// printed in the resulting C file.
type uint128: Type0 = { low: U64.t; high: U64.t }
let t = uint128
let _ = intro_ambient n
let _ = intro_ambient t
[@@ noextract_to "krml"]
let v x = U64.v x.low + (U64.v x.high) * (pow2 64)
let div_mod (x:nat) (k:nat{k > 0}) : Lemma (x / k * k + x % k == x) = ()
let uint_to_t x =
div_mod x (pow2 64);
{ low = U64.uint_to_t (x % (pow2 64));
high = U64.uint_to_t (x / (pow2 64)); }
let v_inj (x1 x2: t): Lemma (requires (v x1 == v x2))
(ensures x1 == x2) =
assert (uint_to_t (v x1) == uint_to_t (v x2));
assert (uint_to_t (v x1) == x1);
assert (uint_to_t (v x2) == x2);
()
(* A weird helper used below... seems like the native encoding of
bitvectors may be making these proofs much harder than they should be? *)
let bv2int_fun (#n:pos) (a b : bv_t n)
: Lemma (requires a == b)
(ensures bv2int a == bv2int b)
= ()
(* This proof is quite brittle. It has a bunch of annotations to get
decent verification performance. *)
let constant_time_carry_ok (a b:U64.t)
: Lemma (constant_time_carry a b ==
(if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0))
= calc (==) {
U64.v (constant_time_carry a b);
(==) { carry_uint64_equiv a b }
carry_uint64 (U64.v a) (U64.v b);
(==) { inverse_num_lemma (carry_uint64 (U64.v a) (U64.v b)) }
bv2int (int2bv (carry_uint64 (U64.v a) (U64.v b)));
(==) { carry_uint64_ok (U64.v a) (U64.v b);
bv2int_fun (int2bv (carry_uint64 (U64.v a) (U64.v b))) (carry_bv (U64.v a) (U64.v b));
()
}
bv2int (carry_bv (U64.v a) (U64.v b));
(==) {
lem_ult (U64.v a) (U64.v b);
bv2int_fun (carry_bv (U64.v a) (U64.v b)) (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
}
bv2int
(if U64.v a < U64.v b
then int2bv 1
else int2bv 0);
};
assert (
bv2int (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
== U64.v (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)) by (T.norm []);
U64.v_inj (constant_time_carry a b) (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)
let carry (a b: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r == (if U64.v a < U64.v b then 1 else 0))) =
constant_time_carry_ok a b;
constant_time_carry a b
let carry_sum_ok (a b:U64.t) :
Lemma (U64.v (carry (U64.add_mod a b) b) == (U64.v a + U64.v b) / (pow2 64)) = ()
let add (a b: t) : Pure t
(requires (v a + v b < pow2 128))
(ensures (fun r -> v a + v b = v r)) =
let l = U64.add_mod a.low b.low in
carry_sum_ok a.low b.low;
{ low = l;
high = U64.add (U64.add a.high b.high) (carry l b.low); }
let add_underspec (a b: t) =
let l = U64.add_mod a.low b.low in
begin
if v a + v b < pow2 128
then carry_sum_ok a.low b.low
else ()
end;
{ low = l;
high = U64.add_underspec (U64.add_underspec a.high b.high) (carry l b.low); }
val mod_mod: a:nat -> k:nat{k>0} -> k':nat{k'>0} ->
Lemma ((a % k) % (k'*k) == a % k)
let mod_mod a k k' =
assert (a % k < k);
assert (a % k < k' * k)
let mod_spec (a:nat) (k:nat{k > 0}) :
Lemma (a % k == a - a / k * k) = ()
val div_product : n:nat -> m1:nat{m1>0} -> m2:nat{m2>0} ->
Lemma (n / (m1*m2) == (n / m1) / m2)
let div_product n m1 m2 =
Math.division_multiplication_lemma n m1 m2
val mul_div_cancel : n:nat -> k:nat{k>0} ->
Lemma ((n * k) / k == n)
let mul_div_cancel n k =
Math.cancel_mul_div n k
val mod_mul: n:nat -> k1:pos -> k2:pos ->
Lemma ((n % k2) * k1 == (n * k1) % (k1*k2))
let mod_mul n k1 k2 =
Math.modulo_scale_lemma n k1 k2
let mod_spec_rew_n (n:nat) (k:nat{k > 0}) :
Lemma (n == n / k * k + n % k) = mod_spec n k
val mod_add: n1:nat -> n2:nat -> k:nat{k > 0} -> | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val mod_add: n1:nat -> n2:nat -> k:nat{k > 0} ->
Lemma ((n1 % k + n2 % k) % k == (n1 + n2) % k) | [] | FStar.UInt128.mod_add | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | n1: Prims.nat -> n2: Prims.nat -> k: Prims.nat{k > 0}
-> FStar.Pervasives.Lemma (ensures (n1 % k + n2 % k) % k == (n1 + n2) % k) | {
"end_col": 56,
"end_line": 228,
"start_col": 22,
"start_line": 228
} |
FStar.Pervasives.Lemma | val u64_logand_comm (a b: U64.t) : Lemma (U64.logand a b == U64.logand b a) | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let u64_logand_comm (a b:U64.t) : Lemma (U64.logand a b == U64.logand b a) =
UInt.logand_commutative (U64.v a) (U64.v b) | val u64_logand_comm (a b: U64.t) : Lemma (U64.logand a b == U64.logand b a)
let u64_logand_comm (a b: U64.t) : Lemma (U64.logand a b == U64.logand b a) = | false | null | true | UInt.logand_commutative (U64.v a) (U64.v b) | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [
"lemma"
] | [
"FStar.UInt64.t",
"FStar.UInt.logand_commutative",
"FStar.UInt64.n",
"FStar.UInt64.v",
"Prims.unit",
"Prims.l_True",
"Prims.squash",
"Prims.eq2",
"FStar.UInt64.logand",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract
let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63
let carry_uint64_ok (a b:uint_t 64)
: squash (int2bv (carry_uint64 a b) == carry_bv a b)
= _ by (T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl())
let fact1 (a b: uint_t 64) = carry_bv a b == int2bv 1
let fact0 (a b: uint_t 64) = carry_bv a b == int2bv 0
let lem_ult_1 (a b: uint_t 64)
: squash (bvult (int2bv a) (int2bv b) ==> fact1 a b)
= assert (bvult (int2bv a) (int2bv b) ==> fact1 a b)
by (T.norm [delta_only [`%fact1;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'";
smt())
let lem_ult_2 (a b:uint_t 64)
: squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
= assert (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
by (T.norm [delta_only [`%fact0;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'")
let int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b))
= introduce (a < b) ==> (bvult #n (int2bv #n a) (int2bv #n b))
with _ . FStar.BV.int2bv_lemma_ult_1 a b;
introduce (bvult #n (int2bv #n a) (int2bv #n b)) ==> (a < b)
with _ . FStar.BV.int2bv_lemma_ult_2 a b
let lem_ult (a b:uint_t 64)
: Lemma (if a < b
then fact1 a b
else fact0 a b)
= int2bv_ult a b;
lem_ult_1 a b;
lem_ult_2 a b
let constant_time_carry (a b: U64.t) : Tot U64.t =
let open U64 in
// CONSTANT_TIME_CARRY macro
// ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
// 63 = sizeof(a) * 8 - 1
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63ul
let carry_uint64_equiv (a b:UInt64.t)
: Lemma (U64.v (constant_time_carry a b) == carry_uint64 (U64.v a) (U64.v b))
= ()
// This type gets a special treatment in KaRaMeL and its definition is never
// printed in the resulting C file.
type uint128: Type0 = { low: U64.t; high: U64.t }
let t = uint128
let _ = intro_ambient n
let _ = intro_ambient t
[@@ noextract_to "krml"]
let v x = U64.v x.low + (U64.v x.high) * (pow2 64)
let div_mod (x:nat) (k:nat{k > 0}) : Lemma (x / k * k + x % k == x) = ()
let uint_to_t x =
div_mod x (pow2 64);
{ low = U64.uint_to_t (x % (pow2 64));
high = U64.uint_to_t (x / (pow2 64)); }
let v_inj (x1 x2: t): Lemma (requires (v x1 == v x2))
(ensures x1 == x2) =
assert (uint_to_t (v x1) == uint_to_t (v x2));
assert (uint_to_t (v x1) == x1);
assert (uint_to_t (v x2) == x2);
()
(* A weird helper used below... seems like the native encoding of
bitvectors may be making these proofs much harder than they should be? *)
let bv2int_fun (#n:pos) (a b : bv_t n)
: Lemma (requires a == b)
(ensures bv2int a == bv2int b)
= ()
(* This proof is quite brittle. It has a bunch of annotations to get
decent verification performance. *)
let constant_time_carry_ok (a b:U64.t)
: Lemma (constant_time_carry a b ==
(if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0))
= calc (==) {
U64.v (constant_time_carry a b);
(==) { carry_uint64_equiv a b }
carry_uint64 (U64.v a) (U64.v b);
(==) { inverse_num_lemma (carry_uint64 (U64.v a) (U64.v b)) }
bv2int (int2bv (carry_uint64 (U64.v a) (U64.v b)));
(==) { carry_uint64_ok (U64.v a) (U64.v b);
bv2int_fun (int2bv (carry_uint64 (U64.v a) (U64.v b))) (carry_bv (U64.v a) (U64.v b));
()
}
bv2int (carry_bv (U64.v a) (U64.v b));
(==) {
lem_ult (U64.v a) (U64.v b);
bv2int_fun (carry_bv (U64.v a) (U64.v b)) (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
}
bv2int
(if U64.v a < U64.v b
then int2bv 1
else int2bv 0);
};
assert (
bv2int (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
== U64.v (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)) by (T.norm []);
U64.v_inj (constant_time_carry a b) (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)
let carry (a b: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r == (if U64.v a < U64.v b then 1 else 0))) =
constant_time_carry_ok a b;
constant_time_carry a b
let carry_sum_ok (a b:U64.t) :
Lemma (U64.v (carry (U64.add_mod a b) b) == (U64.v a + U64.v b) / (pow2 64)) = ()
let add (a b: t) : Pure t
(requires (v a + v b < pow2 128))
(ensures (fun r -> v a + v b = v r)) =
let l = U64.add_mod a.low b.low in
carry_sum_ok a.low b.low;
{ low = l;
high = U64.add (U64.add a.high b.high) (carry l b.low); }
let add_underspec (a b: t) =
let l = U64.add_mod a.low b.low in
begin
if v a + v b < pow2 128
then carry_sum_ok a.low b.low
else ()
end;
{ low = l;
high = U64.add_underspec (U64.add_underspec a.high b.high) (carry l b.low); }
val mod_mod: a:nat -> k:nat{k>0} -> k':nat{k'>0} ->
Lemma ((a % k) % (k'*k) == a % k)
let mod_mod a k k' =
assert (a % k < k);
assert (a % k < k' * k)
let mod_spec (a:nat) (k:nat{k > 0}) :
Lemma (a % k == a - a / k * k) = ()
val div_product : n:nat -> m1:nat{m1>0} -> m2:nat{m2>0} ->
Lemma (n / (m1*m2) == (n / m1) / m2)
let div_product n m1 m2 =
Math.division_multiplication_lemma n m1 m2
val mul_div_cancel : n:nat -> k:nat{k>0} ->
Lemma ((n * k) / k == n)
let mul_div_cancel n k =
Math.cancel_mul_div n k
val mod_mul: n:nat -> k1:pos -> k2:pos ->
Lemma ((n % k2) * k1 == (n * k1) % (k1*k2))
let mod_mul n k1 k2 =
Math.modulo_scale_lemma n k1 k2
let mod_spec_rew_n (n:nat) (k:nat{k > 0}) :
Lemma (n == n / k * k + n % k) = mod_spec n k
val mod_add: n1:nat -> n2:nat -> k:nat{k > 0} ->
Lemma ((n1 % k + n2 % k) % k == (n1 + n2) % k)
let mod_add n1 n2 k = Math.modulo_distributivity n1 n2 k
val mod_add_small: n1:nat -> n2:nat -> k:nat{k > 0} -> Lemma
(requires (n1 % k + n2 % k < k))
(ensures (n1 % k + n2 % k == (n1 + n2) % k))
let mod_add_small n1 n2 k =
mod_add n1 n2 k;
Math.small_modulo_lemma_1 (n1%k + n2%k) k
// This proof is pretty stable with the calc proof, but it can fail
// ~1% of the times, so add a retry.
#push-options "--split_queries no --z3rlimit 20 --retry 5"
let add_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a + v b) % pow2 128 = v r)) =
let l = U64.add_mod a.low b.low in
let r = { low = l;
high = U64.add_mod (U64.add_mod a.high b.high) (carry l b.low)} in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
let b_l = U64.v b.low in
let b_h = U64.v b.high in
carry_sum_ok a.low b.low;
Math.lemma_mod_plus_distr_l (a_h + b_h) ((a_l + b_l) / (pow2 64)) (pow2 64);
calc (==) {
U64.v r.high * pow2 64;
== {}
((a_h + b_h + (a_l + b_l) / (pow2 64)) % pow2 64) * pow2 64;
== { mod_mul (a_h + b_h + (a_l + b_l) / (pow2 64)) (pow2 64) (pow2 64) }
((a_h + b_h + (a_l + b_l) / (pow2 64)) * pow2 64) % (pow2 64 * pow2 64);
== {}
((a_h + b_h + (a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
== {}
(a_h * pow2 64 + b_h * pow2 64 + ((a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
};
assert (U64.v r.low == (U64.v r.low) % pow2 128);
mod_add_small (a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64))
((a_l + b_l) % (pow2 64))
(pow2 128);
assert (U64.v r.low + U64.v r.high * pow2 64 ==
(a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64) + (a_l + b_l) % (pow2 64)) % pow2 128);
mod_spec_rew_n (a_l + b_l) (pow2 64);
assert (v r ==
(a_h * pow2 64 +
b_h * pow2 64 +
a_l + b_l) % pow2 128);
assert_spinoff ((v a + v b) % pow2 128 = v r);
r
#pop-options
#push-options "--retry 5"
let sub (a b: t) : Pure t
(requires (v a - v b >= 0))
(ensures (fun r -> v r = v a - v b)) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub (U64.sub a.high b.high) (carry a.low l); }
#pop-options
let sub_underspec (a b: t) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_underspec (U64.sub_underspec a.high b.high) (carry a.low l); }
let sub_mod_impl (a b: t) : t =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_mod (U64.sub_mod a.high b.high) (carry a.low l); }
#push-options "--retry 10" // flaky
let sub_mod_pos_ok (a b:t) : Lemma
(requires (v a - v b >= 0))
(ensures (v (sub_mod_impl a b) = v a - v b)) =
assert (sub a b == sub_mod_impl a b);
()
#pop-options
val u64_diff_wrap : a:U64.t -> b:U64.t ->
Lemma (requires (U64.v a < U64.v b))
(ensures (U64.v (U64.sub_mod a b) == U64.v a - U64.v b + pow2 64))
let u64_diff_wrap a b = ()
#push-options "--z3rlimit 20"
val sub_mod_wrap1_ok : a:t -> b:t -> Lemma
(requires (v a - v b < 0 /\ U64.v a.low < U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128))
#push-options "--retry 10"
let sub_mod_wrap1_ok a b =
// carry == 1 and subtraction in low wraps
let l = U64.sub_mod a.low b.low in
assert (U64.v (carry a.low l) == 1);
u64_diff_wrap a.low b.low;
// a.high <= b.high since v a < v b;
// case split on equality and strictly less
if U64.v a.high = U64.v b.high then ()
else begin
u64_diff_wrap a.high b.high;
()
end
#pop-options
let sum_lt (a1 a2 b1 b2:nat) : Lemma
(requires (a1 + a2 < b1 + b2 /\ a1 >= b1))
(ensures (a2 < b2)) = ()
let sub_mod_wrap2_ok (a b:t) : Lemma
(requires (v a - v b < 0 /\ U64.v a.low >= U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
// carry == 0, subtraction in low is exact, but subtraction in high
// must wrap since v a < v b
let l = U64.sub_mod a.low b.low in
let r = sub_mod_impl a b in
assert (U64.v l == U64.v a.low - U64.v b.low);
assert (U64.v (carry a.low l) == 0);
sum_lt (U64.v a.low) (U64.v a.high * pow2 64) (U64.v b.low) (U64.v b.high * pow2 64);
assert (U64.v (U64.sub_mod a.high b.high) == U64.v a.high - U64.v b.high + pow2 64);
()
let sub_mod_wrap_ok (a b:t) : Lemma
(requires (v a - v b < 0))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
if U64.v a.low < U64.v b.low
then sub_mod_wrap1_ok a b
else sub_mod_wrap2_ok a b
#push-options "--z3rlimit 40"
let sub_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = (v a - v b) % pow2 128)) =
(if v a - v b >= 0
then sub_mod_pos_ok a b
else sub_mod_wrap_ok a b);
sub_mod_impl a b
#pop-options
val shift_bound : #n:nat -> num:UInt.uint_t n -> n':nat ->
Lemma (num * pow2 n' <= pow2 (n'+n) - pow2 n')
let shift_bound #n num n' =
Math.lemma_mult_le_right (pow2 n') num (pow2 n - 1);
Math.distributivity_sub_left (pow2 n) 1 (pow2 n');
Math.pow2_plus n' n
val append_uint : #n1:nat -> #n2:nat -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 -> UInt.uint_t (n1+n2)
let append_uint #n1 #n2 num1 num2 =
shift_bound num2 n1;
num1 + num2 * pow2 n1
val to_vec_append : #n1:nat{n1 > 0} -> #n2:nat{n2 > 0} -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 ->
Lemma (UInt.to_vec (append_uint num1 num2) == Seq.append (UInt.to_vec num2) (UInt.to_vec num1))
let to_vec_append #n1 #n2 num1 num2 =
UInt.append_lemma (UInt.to_vec num2) (UInt.to_vec num1)
let vec128 (a: t) : BV.bv_t 128 = UInt.to_vec #128 (v a)
let vec64 (a: U64.t) : BV.bv_t 64 = UInt.to_vec (U64.v a)
let to_vec_v (a: t) :
Lemma (vec128 a == Seq.append (vec64 a.high) (vec64 a.low)) =
to_vec_append (U64.v a.low) (U64.v a.high)
val logand_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2) ==
BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2))
(BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logand #128 (v a) (v b))) =
let r = { low = U64.logand a.low b.low;
high = U64.logand a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logand_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logand_vec (vec128 a) (vec128 b));
r
val logxor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2) ==
BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2))
(BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logxor #128 (v a) (v b))) =
let r = { low = U64.logxor a.low b.low;
high = U64.logxor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logxor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logxor_vec (vec128 a) (vec128 b));
r
val logor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2) ==
BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2))
(BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logor #128 (v a) (v b))) =
let r = { low = U64.logor a.low b.low;
high = U64.logor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logor_vec (vec128 a) (vec128 b));
r
val lognot_vec_append (#n1 #n2: pos) (a1: BV.bv_t n1) (a2: BV.bv_t n2) :
Lemma (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2) ==
BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot_vec_append #n1 #n2 a1 a2 =
Seq.lemma_eq_intro (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2))
(BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot (a: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.lognot #128 (v a))) =
let r = { low = U64.lognot a.low;
high = U64.lognot a.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
lognot_vec_append (vec64 a.high) (vec64 a.low);
to_vec_v a;
assert (vec128 r == BV.lognot_vec (vec128 a));
r
let mod_mul_cancel (n:nat) (k:nat{k > 0}) :
Lemma ((n * k) % k == 0) =
mod_spec (n * k) k;
mul_div_cancel n k;
()
let shift_past_mod (n:nat) (k1:nat) (k2:nat{k2 >= k1}) :
Lemma ((n * pow2 k2) % pow2 k1 == 0) =
assert (k2 == (k2 - k1) + k1);
Math.pow2_plus (k2 - k1) k1;
Math.paren_mul_right n (pow2 (k2 - k1)) (pow2 k1);
mod_mul_cancel (n * pow2 (k2 - k1)) (pow2 k1)
val mod_double: a:nat -> k:nat{k>0} ->
Lemma (a % k % k == a % k)
let mod_double a k =
mod_mod a k 1
let shift_left_large_val (#n1:nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s:nat) :
Lemma ((a1 + a2 * pow2 n1) * pow2 s == (a1 * pow2 s + a2 * pow2 (n1+s))) =
Math.distributivity_add_left a1 (a2 * pow2 n1) (pow2 s);
Math.paren_mul_right a2 (pow2 n1) (pow2 s);
Math.pow2_plus n1 s
#push-options "--z3rlimit 40"
let shift_left_large_lemma (#n1: nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s: nat{s >= n2}) :
Lemma (((a1 + a2 * pow2 n1) * pow2 s) % pow2 (n1+n2) ==
(a1 * pow2 s) % pow2 (n1+n2)) =
shift_left_large_val a1 a2 s;
mod_add (a1 * pow2 s) (a2 * pow2 (n1+s)) (pow2 (n1+n2));
shift_past_mod a2 (n1+n2) (n1+s);
mod_double (a1 * pow2 s) (pow2 (n1+n2));
()
#pop-options
val shift_left_large_lemma_t : a:t -> s:nat ->
Lemma (requires (s >= 64))
(ensures ((v a * pow2 s) % pow2 128 ==
(U64.v a.low * pow2 s) % pow2 128))
let shift_left_large_lemma_t a s =
shift_left_large_lemma #64 #64 (U64.v a.low) (U64.v a.high) s
private let u32_64: n:U32.t{U32.v n == 64} = U32.uint_to_t 64
val div_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} -> Lemma
(requires True)
(ensures (a / pow2 (n1 - n2) == a * pow2 n2 / pow2 n1))
let div_pow2_diff a n1 n2 =
Math.pow2_plus n2 (n1-n2);
assert (a * pow2 n2 / pow2 n1 == a * pow2 n2 / (pow2 n2 * pow2 (n1 - n2)));
div_product (a * pow2 n2) (pow2 n2) (pow2 (n1-n2));
mul_div_cancel a (pow2 n2)
val mod_mul_pow2 : n:nat -> e1:nat -> e2:nat ->
Lemma (n % pow2 e1 * pow2 e2 <= pow2 (e1+e2) - pow2 e2)
let mod_mul_pow2 n e1 e2 =
Math.lemma_mod_lt n (pow2 e1);
Math.lemma_mult_le_right (pow2 e2) (n % pow2 e1) (pow2 e1 - 1);
assert (n % pow2 e1 * pow2 e2 <= pow2 e1 * pow2 e2 - pow2 e2);
Math.pow2_plus e1 e2
let pow2_div_bound #b (n:UInt.uint_t b) (s:nat{s <= b}) :
Lemma (n / pow2 s < pow2 (b - s)) =
Math.lemma_div_lt n b s
#push-options "--smtencoding.l_arith_repr native --z3rlimit 40"
let add_u64_shift_left (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r = (U64.v hi * pow2 (U32.v s)) % pow2 64 + U64.v lo / pow2 (64 - U32.v s))) =
let high = U64.shift_left hi s in
let low = U64.shift_right lo (U32.sub u32_64 s) in
let s = U32.v s in
let high_n = U64.v hi % pow2 (64 - s) * pow2 s in
let low_n = U64.v lo / pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 s) (pow2 (64-s));
assert (U64.v high == high_n);
assert (U64.v low == low_n);
pow2_div_bound (U64.v lo) (64-s);
assert (low_n < pow2 s);
mod_mul_pow2 (U64.v hi) (64 - s) s;
U64.add high low
#pop-options
let div_plus_multiple (a:nat) (b:nat) (k:pos) :
Lemma (requires (a < k))
(ensures ((a + b * k) / k == b)) =
Math.division_addition_lemma a k b;
Math.small_division_lemma_1 a k
val div_add_small: n:nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (k1*m / (k1*k2) == (n + k1*m) / (k1*k2)))
let div_add_small n m k1 k2 =
div_product (k1*m) k1 k2;
div_product (n+k1*m) k1 k2;
mul_div_cancel m k1;
assert (k1*m/k1 == m);
div_plus_multiple n m k1
val add_mod_small: n: nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (n + (k1 * m) % (k1 * k2) ==
(n + k1 * m) % (k1 * k2)))
let add_mod_small n m k1 k2 =
mod_spec (k1 * m) (k1 * k2);
mod_spec (n + k1 * m) (k1 * k2);
div_add_small n m k1 k2
let mod_then_mul_64 (n:nat) : Lemma (n % pow2 64 * pow2 64 == n * pow2 64 % pow2 128) =
Math.pow2_plus 64 64;
mod_mul n (pow2 64) (pow2 64)
let mul_abc_to_acb (a b c: int) : Lemma (a * b * c == a * c * b) = ()
let add_u64_shift_left_respec (hi lo:U64.t) (s:U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r ->
U64.v r * pow2 64 ==
(U64.v hi * pow2 64) * pow2 (U32.v s) % pow2 128 +
U64.v lo * pow2 (U32.v s) / pow2 64 * pow2 64)) =
let r = add_u64_shift_left hi lo s in
let hi = U64.v hi in
let lo = U64.v lo in
let s = U32.v s in
// spec of add_u64_shift_left
assert (U64.v r == hi * pow2 s % pow2 64 + lo / pow2 (64 - s));
Math.distributivity_add_left (hi * pow2 s % pow2 64) (lo / pow2 (64-s)) (pow2 64);
mod_then_mul_64 (hi * pow2 s);
assert (hi * pow2 s % pow2 64 * pow2 64 == (hi * pow2 s * pow2 64) % pow2 128);
div_pow2_diff lo 64 s;
assert (lo / pow2 (64-s) == lo * pow2 s / pow2 64);
assert (U64.v r * pow2 64 == hi * pow2 s * pow2 64 % pow2 128 + lo * pow2 s / pow2 64 * pow2 64);
mul_abc_to_acb hi (pow2 s) (pow2 64);
r
val add_mod_small' : n:nat -> m:nat -> k:pos ->
Lemma (requires (n + m % k < k))
(ensures (n + m % k == (n + m) % k))
let add_mod_small' n m k =
Math.lemma_mod_lt (n + m % k) k;
Math.modulo_lemma n k;
mod_add n m k
#push-options "--retry 5"
let shift_t_val (a: t) (s: nat) :
Lemma (v a * pow2 s == U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s)) =
Math.pow2_plus 64 s;
()
#pop-options
val mul_mod_bound : n:nat -> s1:nat -> s2:nat{s2>=s1} ->
Lemma (n * pow2 s1 % pow2 s2 <= pow2 s2 - pow2 s1)
#push-options "--retry 5"
let mul_mod_bound n s1 s2 =
// n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1
// n % pow2 (s2-s1) <= pow2 (s2-s1) - 1
// n % pow2 (s2-s1) * pow2 s1 <= pow2 s2 - pow2 s1
mod_mul n (pow2 s1) (pow2 (s2-s1));
// assert (n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1);
Math.lemma_mod_lt n (pow2 (s2-s1));
Math.lemma_mult_le_right (pow2 s1) (n % pow2 (s2-s1)) (pow2 (s2-s1) - 1);
Math.pow2_plus (s2-s1) s1
#pop-options
let add_lt_le (a a' b b': int) :
Lemma (requires (a < a' /\ b <= b'))
(ensures (a + b < a' + b')) = ()
let u64_pow2_bound (a: UInt.uint_t 64) (s: nat) :
Lemma (a * pow2 s < pow2 (64+s)) =
Math.pow2_plus 64 s;
Math.lemma_mult_le_right (pow2 s) a (pow2 64)
let shift_t_mod_val' (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s) % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
u64_pow2_bound a_l s;
mul_mod_bound a_h (64+s) 128;
// assert (a_h * pow2 (64+s) % pow2 128 <= pow2 128 - pow2 (64+s));
add_lt_le (a_l * pow2 s) (pow2 (64+s)) (a_h * pow2 (64+s) % pow2 128) (pow2 128 - pow2 (64+s));
add_mod_small' (a_l * pow2 s) (a_h * pow2 (64+s)) (pow2 128);
shift_t_val a s;
()
let shift_t_mod_val (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + (U64.v a.high * pow2 64) * pow2 s % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
shift_t_mod_val' a s;
Math.pow2_plus 64 s;
Math.paren_mul_right a_h (pow2 64) (pow2 s);
()
#push-options "--z3rlimit 20"
let shift_left_small (a: t) (s: U32.t) : Pure t
(requires (U32.v s < 64))
(ensures (fun r -> v r = (v a * pow2 (U32.v s)) % pow2 128)) =
if U32.eq s 0ul then a
else
let r = { low = U64.shift_left a.low s;
high = add_u64_shift_left_respec a.high a.low s; } in
let s = U32.v s in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
mod_spec_rew_n (a_l * pow2 s) (pow2 64);
shift_t_mod_val a s;
r
#pop-options
val shift_left_large : a:t -> s:U32.t{U32.v s >= 64 /\ U32.v s < 128} ->
r:t{v r = (v a * pow2 (U32.v s)) % pow2 128}
#push-options "--z3rlimit 50 --retry 5" // sporadically fails
let shift_left_large a s =
let h_shift = U32.sub s u32_64 in
assert (U32.v h_shift < 64);
let r = { low = U64.uint_to_t 0;
high = U64.shift_left a.low h_shift; } in
assert (U64.v r.high == (U64.v a.low * pow2 (U32.v s - 64)) % pow2 64);
mod_mul (U64.v a.low * pow2 (U32.v s - 64)) (pow2 64) (pow2 64);
Math.pow2_plus (U32.v s - 64) 64;
assert (U64.v r.high * pow2 64 == (U64.v a.low * pow2 (U32.v s)) % pow2 128);
shift_left_large_lemma_t a (U32.v s);
r
#pop-options
let shift_left a s =
if (U32.lt s u32_64) then shift_left_small a s
else shift_left_large a s
let add_u64_shift_right (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r == U64.v lo / pow2 (U32.v s) +
U64.v hi * pow2 (64 - U32.v s) % pow2 64)) =
let low = U64.shift_right lo s in
let high = U64.shift_left hi (U32.sub u32_64 s) in
let s = U32.v s in
let low_n = U64.v lo / pow2 s in
let high_n = U64.v hi % pow2 s * pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 (64-s)) (pow2 s);
assert (U64.v high == high_n);
pow2_div_bound (U64.v lo) s;
assert (low_n < pow2 (64 - s));
mod_mul_pow2 (U64.v hi) s (64 - s);
U64.add low high
val mul_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} ->
Lemma (a * pow2 (n1 - n2) == a * pow2 n1 / pow2 n2)
let mul_pow2_diff a n1 n2 =
Math.paren_mul_right a (pow2 (n1-n2)) (pow2 n2);
mul_div_cancel (a * pow2 (n1 - n2)) (pow2 n2);
Math.pow2_plus (n1 - n2) n2
let add_u64_shift_right_respec (hi lo:U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r == U64.v lo / pow2 (U32.v s) +
U64.v hi * pow2 64 / pow2 (U32.v s) % pow2 64)) =
let r = add_u64_shift_right hi lo s in
let s = U32.v s in
mul_pow2_diff (U64.v hi) 64 s;
r
let mul_div_spec (n:nat) (k:pos) : Lemma (n / k * k == n - n % k) = ()
let mul_distr_sub (n1 n2:nat) (k:nat) : Lemma ((n1 - n2) * k == n1 * k - n2 * k) = ()
val div_product_comm : n1:nat -> k1:pos -> k2:pos ->
Lemma (n1 / k1 / k2 == n1 / k2 / k1)
let div_product_comm n1 k1 k2 =
div_product n1 k1 k2;
div_product n1 k2 k1
val shift_right_reconstruct : a_h:UInt.uint_t 64 -> s:nat{s < 64} ->
Lemma (a_h * pow2 (64-s) == a_h / pow2 s * pow2 64 + a_h * pow2 64 / pow2 s % pow2 64)
let shift_right_reconstruct a_h s =
mul_pow2_diff a_h 64 s;
mod_spec_rew_n (a_h * pow2 (64-s)) (pow2 64);
div_product_comm (a_h * pow2 64) (pow2 s) (pow2 64);
mul_div_cancel a_h (pow2 64);
assert (a_h / pow2 s * pow2 64 == a_h * pow2 64 / pow2 s / pow2 64 * pow2 64);
()
val u128_div_pow2 (a: t) (s:nat{s < 64}) :
Lemma (v a / pow2 s == U64.v a.low / pow2 s + U64.v a.high * pow2 (64 - s))
let u128_div_pow2 a s =
Math.pow2_plus (64-s) s;
Math.paren_mul_right (U64.v a.high) (pow2 (64-s)) (pow2 s);
Math.division_addition_lemma (U64.v a.low) (pow2 s) (U64.v a.high * pow2 (64 - s))
let shift_right_small (a: t) (s: U32.t{U32.v s < 64}) : Pure t
(requires True)
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
if U32.eq s 0ul then a
else
let r = { low = add_u64_shift_right_respec a.high a.low s;
high = U64.shift_right a.high s; } in
let a_h = U64.v a.high in
let a_l = U64.v a.low in
let s = U32.v s in
shift_right_reconstruct a_h s;
assert (v r == a_h * pow2 (64-s) + a_l / pow2 s);
u128_div_pow2 a s;
r
let shift_right_large (a: t) (s: U32.t{U32.v s >= 64 /\ U32.v s < 128}) : Pure t
(requires True)
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
let r = { high = U64.uint_to_t 0;
low = U64.shift_right a.high (U32.sub s u32_64); } in
let s = U32.v s in
Math.pow2_plus 64 (s - 64);
div_product (v a) (pow2 64) (pow2 (s - 64));
assert (v a / pow2 s == v a / pow2 64 / pow2 (s - 64));
div_plus_multiple (U64.v a.low) (U64.v a.high) (pow2 64);
r
let shift_right (a: t) (s: U32.t) : Pure t
(requires (U32.v s < 128))
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
if U32.lt s u32_64
then shift_right_small a s
else shift_right_large a s
let eq (a b:t) = U64.eq a.low b.low && U64.eq a.high b.high
let gt (a b:t) = U64.gt a.high b.high ||
(U64.eq a.high b.high && U64.gt a.low b.low)
let lt (a b:t) = U64.lt a.high b.high ||
(U64.eq a.high b.high && U64.lt a.low b.low)
let gte (a b:t) = U64.gt a.high b.high ||
(U64.eq a.high b.high && U64.gte a.low b.low)
let lte (a b:t) = U64.lt a.high b.high ||
(U64.eq a.high b.high && U64.lte a.low b.low) | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val u64_logand_comm (a b: U64.t) : Lemma (U64.logand a b == U64.logand b a) | [] | FStar.UInt128.u64_logand_comm | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: FStar.UInt64.t -> b: FStar.UInt64.t
-> FStar.Pervasives.Lemma (ensures FStar.UInt64.logand a b == FStar.UInt64.logand b a) | {
"end_col": 45,
"end_line": 818,
"start_col": 2,
"start_line": 818
} |
FStar.Pervasives.Lemma | val v_inj (x1 x2: t): Lemma (requires (v x1 == v x2)) (ensures (x1 == x2)) | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let v_inj (x1 x2: t): Lemma (requires (v x1 == v x2))
(ensures x1 == x2) =
assert (uint_to_t (v x1) == uint_to_t (v x2));
assert (uint_to_t (v x1) == x1);
assert (uint_to_t (v x2) == x2);
() | val v_inj (x1 x2: t): Lemma (requires (v x1 == v x2)) (ensures (x1 == x2))
let v_inj (x1 x2: t) : Lemma (requires (v x1 == v x2)) (ensures x1 == x2) = | false | null | true | assert (uint_to_t (v x1) == uint_to_t (v x2));
assert (uint_to_t (v x1) == x1);
assert (uint_to_t (v x2) == x2);
() | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [
"lemma"
] | [
"FStar.UInt128.t",
"Prims.unit",
"Prims._assert",
"Prims.eq2",
"FStar.UInt128.uint_to_t",
"FStar.UInt128.v",
"FStar.UInt.uint_t",
"FStar.UInt128.n",
"Prims.squash",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract
let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63
let carry_uint64_ok (a b:uint_t 64)
: squash (int2bv (carry_uint64 a b) == carry_bv a b)
= _ by (T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl())
let fact1 (a b: uint_t 64) = carry_bv a b == int2bv 1
let fact0 (a b: uint_t 64) = carry_bv a b == int2bv 0
let lem_ult_1 (a b: uint_t 64)
: squash (bvult (int2bv a) (int2bv b) ==> fact1 a b)
= assert (bvult (int2bv a) (int2bv b) ==> fact1 a b)
by (T.norm [delta_only [`%fact1;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'";
smt())
let lem_ult_2 (a b:uint_t 64)
: squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
= assert (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
by (T.norm [delta_only [`%fact0;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'")
let int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b))
= introduce (a < b) ==> (bvult #n (int2bv #n a) (int2bv #n b))
with _ . FStar.BV.int2bv_lemma_ult_1 a b;
introduce (bvult #n (int2bv #n a) (int2bv #n b)) ==> (a < b)
with _ . FStar.BV.int2bv_lemma_ult_2 a b
let lem_ult (a b:uint_t 64)
: Lemma (if a < b
then fact1 a b
else fact0 a b)
= int2bv_ult a b;
lem_ult_1 a b;
lem_ult_2 a b
let constant_time_carry (a b: U64.t) : Tot U64.t =
let open U64 in
// CONSTANT_TIME_CARRY macro
// ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
// 63 = sizeof(a) * 8 - 1
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63ul
let carry_uint64_equiv (a b:UInt64.t)
: Lemma (U64.v (constant_time_carry a b) == carry_uint64 (U64.v a) (U64.v b))
= ()
// This type gets a special treatment in KaRaMeL and its definition is never
// printed in the resulting C file.
type uint128: Type0 = { low: U64.t; high: U64.t }
let t = uint128
let _ = intro_ambient n
let _ = intro_ambient t
[@@ noextract_to "krml"]
let v x = U64.v x.low + (U64.v x.high) * (pow2 64)
let div_mod (x:nat) (k:nat{k > 0}) : Lemma (x / k * k + x % k == x) = ()
let uint_to_t x =
div_mod x (pow2 64);
{ low = U64.uint_to_t (x % (pow2 64));
high = U64.uint_to_t (x / (pow2 64)); }
let v_inj (x1 x2: t): Lemma (requires (v x1 == v x2)) | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val v_inj (x1 x2: t): Lemma (requires (v x1 == v x2)) (ensures (x1 == x2)) | [] | FStar.UInt128.v_inj | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | x1: FStar.UInt128.t -> x2: FStar.UInt128.t
-> FStar.Pervasives.Lemma (requires FStar.UInt128.v x1 == FStar.UInt128.v x2) (ensures x1 == x2) | {
"end_col": 3,
"end_line": 133,
"start_col": 1,
"start_line": 130
} |
FStar.Pervasives.Lemma | val shift_t_mod_val (a: t) (s: nat{s < 64})
: Lemma
((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + (U64.v a.high * pow2 64) * pow2 s % pow2 128) | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let shift_t_mod_val (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + (U64.v a.high * pow2 64) * pow2 s % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
shift_t_mod_val' a s;
Math.pow2_plus 64 s;
Math.paren_mul_right a_h (pow2 64) (pow2 s);
() | val shift_t_mod_val (a: t) (s: nat{s < 64})
: Lemma
((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + (U64.v a.high * pow2 64) * pow2 s % pow2 128)
let shift_t_mod_val (a: t) (s: nat{s < 64})
: Lemma
((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + (U64.v a.high * pow2 64) * pow2 s % pow2 128) = | false | null | true | let a_l = U64.v a.low in
let a_h = U64.v a.high in
shift_t_mod_val' a s;
Math.pow2_plus 64 s;
Math.paren_mul_right a_h (pow2 64) (pow2 s);
() | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [
"lemma"
] | [
"FStar.UInt128.t",
"Prims.nat",
"Prims.b2t",
"Prims.op_LessThan",
"Prims.unit",
"FStar.Math.Lemmas.paren_mul_right",
"Prims.pow2",
"FStar.Math.Lemmas.pow2_plus",
"FStar.UInt128.shift_t_mod_val'",
"FStar.UInt.uint_t",
"FStar.UInt64.v",
"FStar.UInt128.__proj__Mkuint128__item__high",
"FStar.UInt128.__proj__Mkuint128__item__low",
"Prims.l_True",
"Prims.squash",
"Prims.eq2",
"Prims.int",
"Prims.op_Modulus",
"FStar.Mul.op_Star",
"FStar.UInt128.v",
"Prims.op_Addition",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract
let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63
let carry_uint64_ok (a b:uint_t 64)
: squash (int2bv (carry_uint64 a b) == carry_bv a b)
= _ by (T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl())
let fact1 (a b: uint_t 64) = carry_bv a b == int2bv 1
let fact0 (a b: uint_t 64) = carry_bv a b == int2bv 0
let lem_ult_1 (a b: uint_t 64)
: squash (bvult (int2bv a) (int2bv b) ==> fact1 a b)
= assert (bvult (int2bv a) (int2bv b) ==> fact1 a b)
by (T.norm [delta_only [`%fact1;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'";
smt())
let lem_ult_2 (a b:uint_t 64)
: squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
= assert (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
by (T.norm [delta_only [`%fact0;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'")
let int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b))
= introduce (a < b) ==> (bvult #n (int2bv #n a) (int2bv #n b))
with _ . FStar.BV.int2bv_lemma_ult_1 a b;
introduce (bvult #n (int2bv #n a) (int2bv #n b)) ==> (a < b)
with _ . FStar.BV.int2bv_lemma_ult_2 a b
let lem_ult (a b:uint_t 64)
: Lemma (if a < b
then fact1 a b
else fact0 a b)
= int2bv_ult a b;
lem_ult_1 a b;
lem_ult_2 a b
let constant_time_carry (a b: U64.t) : Tot U64.t =
let open U64 in
// CONSTANT_TIME_CARRY macro
// ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
// 63 = sizeof(a) * 8 - 1
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63ul
let carry_uint64_equiv (a b:UInt64.t)
: Lemma (U64.v (constant_time_carry a b) == carry_uint64 (U64.v a) (U64.v b))
= ()
// This type gets a special treatment in KaRaMeL and its definition is never
// printed in the resulting C file.
type uint128: Type0 = { low: U64.t; high: U64.t }
let t = uint128
let _ = intro_ambient n
let _ = intro_ambient t
[@@ noextract_to "krml"]
let v x = U64.v x.low + (U64.v x.high) * (pow2 64)
let div_mod (x:nat) (k:nat{k > 0}) : Lemma (x / k * k + x % k == x) = ()
let uint_to_t x =
div_mod x (pow2 64);
{ low = U64.uint_to_t (x % (pow2 64));
high = U64.uint_to_t (x / (pow2 64)); }
let v_inj (x1 x2: t): Lemma (requires (v x1 == v x2))
(ensures x1 == x2) =
assert (uint_to_t (v x1) == uint_to_t (v x2));
assert (uint_to_t (v x1) == x1);
assert (uint_to_t (v x2) == x2);
()
(* A weird helper used below... seems like the native encoding of
bitvectors may be making these proofs much harder than they should be? *)
let bv2int_fun (#n:pos) (a b : bv_t n)
: Lemma (requires a == b)
(ensures bv2int a == bv2int b)
= ()
(* This proof is quite brittle. It has a bunch of annotations to get
decent verification performance. *)
let constant_time_carry_ok (a b:U64.t)
: Lemma (constant_time_carry a b ==
(if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0))
= calc (==) {
U64.v (constant_time_carry a b);
(==) { carry_uint64_equiv a b }
carry_uint64 (U64.v a) (U64.v b);
(==) { inverse_num_lemma (carry_uint64 (U64.v a) (U64.v b)) }
bv2int (int2bv (carry_uint64 (U64.v a) (U64.v b)));
(==) { carry_uint64_ok (U64.v a) (U64.v b);
bv2int_fun (int2bv (carry_uint64 (U64.v a) (U64.v b))) (carry_bv (U64.v a) (U64.v b));
()
}
bv2int (carry_bv (U64.v a) (U64.v b));
(==) {
lem_ult (U64.v a) (U64.v b);
bv2int_fun (carry_bv (U64.v a) (U64.v b)) (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
}
bv2int
(if U64.v a < U64.v b
then int2bv 1
else int2bv 0);
};
assert (
bv2int (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
== U64.v (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)) by (T.norm []);
U64.v_inj (constant_time_carry a b) (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)
let carry (a b: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r == (if U64.v a < U64.v b then 1 else 0))) =
constant_time_carry_ok a b;
constant_time_carry a b
let carry_sum_ok (a b:U64.t) :
Lemma (U64.v (carry (U64.add_mod a b) b) == (U64.v a + U64.v b) / (pow2 64)) = ()
let add (a b: t) : Pure t
(requires (v a + v b < pow2 128))
(ensures (fun r -> v a + v b = v r)) =
let l = U64.add_mod a.low b.low in
carry_sum_ok a.low b.low;
{ low = l;
high = U64.add (U64.add a.high b.high) (carry l b.low); }
let add_underspec (a b: t) =
let l = U64.add_mod a.low b.low in
begin
if v a + v b < pow2 128
then carry_sum_ok a.low b.low
else ()
end;
{ low = l;
high = U64.add_underspec (U64.add_underspec a.high b.high) (carry l b.low); }
val mod_mod: a:nat -> k:nat{k>0} -> k':nat{k'>0} ->
Lemma ((a % k) % (k'*k) == a % k)
let mod_mod a k k' =
assert (a % k < k);
assert (a % k < k' * k)
let mod_spec (a:nat) (k:nat{k > 0}) :
Lemma (a % k == a - a / k * k) = ()
val div_product : n:nat -> m1:nat{m1>0} -> m2:nat{m2>0} ->
Lemma (n / (m1*m2) == (n / m1) / m2)
let div_product n m1 m2 =
Math.division_multiplication_lemma n m1 m2
val mul_div_cancel : n:nat -> k:nat{k>0} ->
Lemma ((n * k) / k == n)
let mul_div_cancel n k =
Math.cancel_mul_div n k
val mod_mul: n:nat -> k1:pos -> k2:pos ->
Lemma ((n % k2) * k1 == (n * k1) % (k1*k2))
let mod_mul n k1 k2 =
Math.modulo_scale_lemma n k1 k2
let mod_spec_rew_n (n:nat) (k:nat{k > 0}) :
Lemma (n == n / k * k + n % k) = mod_spec n k
val mod_add: n1:nat -> n2:nat -> k:nat{k > 0} ->
Lemma ((n1 % k + n2 % k) % k == (n1 + n2) % k)
let mod_add n1 n2 k = Math.modulo_distributivity n1 n2 k
val mod_add_small: n1:nat -> n2:nat -> k:nat{k > 0} -> Lemma
(requires (n1 % k + n2 % k < k))
(ensures (n1 % k + n2 % k == (n1 + n2) % k))
let mod_add_small n1 n2 k =
mod_add n1 n2 k;
Math.small_modulo_lemma_1 (n1%k + n2%k) k
// This proof is pretty stable with the calc proof, but it can fail
// ~1% of the times, so add a retry.
#push-options "--split_queries no --z3rlimit 20 --retry 5"
let add_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a + v b) % pow2 128 = v r)) =
let l = U64.add_mod a.low b.low in
let r = { low = l;
high = U64.add_mod (U64.add_mod a.high b.high) (carry l b.low)} in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
let b_l = U64.v b.low in
let b_h = U64.v b.high in
carry_sum_ok a.low b.low;
Math.lemma_mod_plus_distr_l (a_h + b_h) ((a_l + b_l) / (pow2 64)) (pow2 64);
calc (==) {
U64.v r.high * pow2 64;
== {}
((a_h + b_h + (a_l + b_l) / (pow2 64)) % pow2 64) * pow2 64;
== { mod_mul (a_h + b_h + (a_l + b_l) / (pow2 64)) (pow2 64) (pow2 64) }
((a_h + b_h + (a_l + b_l) / (pow2 64)) * pow2 64) % (pow2 64 * pow2 64);
== {}
((a_h + b_h + (a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
== {}
(a_h * pow2 64 + b_h * pow2 64 + ((a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
};
assert (U64.v r.low == (U64.v r.low) % pow2 128);
mod_add_small (a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64))
((a_l + b_l) % (pow2 64))
(pow2 128);
assert (U64.v r.low + U64.v r.high * pow2 64 ==
(a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64) + (a_l + b_l) % (pow2 64)) % pow2 128);
mod_spec_rew_n (a_l + b_l) (pow2 64);
assert (v r ==
(a_h * pow2 64 +
b_h * pow2 64 +
a_l + b_l) % pow2 128);
assert_spinoff ((v a + v b) % pow2 128 = v r);
r
#pop-options
#push-options "--retry 5"
let sub (a b: t) : Pure t
(requires (v a - v b >= 0))
(ensures (fun r -> v r = v a - v b)) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub (U64.sub a.high b.high) (carry a.low l); }
#pop-options
let sub_underspec (a b: t) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_underspec (U64.sub_underspec a.high b.high) (carry a.low l); }
let sub_mod_impl (a b: t) : t =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_mod (U64.sub_mod a.high b.high) (carry a.low l); }
#push-options "--retry 10" // flaky
let sub_mod_pos_ok (a b:t) : Lemma
(requires (v a - v b >= 0))
(ensures (v (sub_mod_impl a b) = v a - v b)) =
assert (sub a b == sub_mod_impl a b);
()
#pop-options
val u64_diff_wrap : a:U64.t -> b:U64.t ->
Lemma (requires (U64.v a < U64.v b))
(ensures (U64.v (U64.sub_mod a b) == U64.v a - U64.v b + pow2 64))
let u64_diff_wrap a b = ()
#push-options "--z3rlimit 20"
val sub_mod_wrap1_ok : a:t -> b:t -> Lemma
(requires (v a - v b < 0 /\ U64.v a.low < U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128))
#push-options "--retry 10"
let sub_mod_wrap1_ok a b =
// carry == 1 and subtraction in low wraps
let l = U64.sub_mod a.low b.low in
assert (U64.v (carry a.low l) == 1);
u64_diff_wrap a.low b.low;
// a.high <= b.high since v a < v b;
// case split on equality and strictly less
if U64.v a.high = U64.v b.high then ()
else begin
u64_diff_wrap a.high b.high;
()
end
#pop-options
let sum_lt (a1 a2 b1 b2:nat) : Lemma
(requires (a1 + a2 < b1 + b2 /\ a1 >= b1))
(ensures (a2 < b2)) = ()
let sub_mod_wrap2_ok (a b:t) : Lemma
(requires (v a - v b < 0 /\ U64.v a.low >= U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
// carry == 0, subtraction in low is exact, but subtraction in high
// must wrap since v a < v b
let l = U64.sub_mod a.low b.low in
let r = sub_mod_impl a b in
assert (U64.v l == U64.v a.low - U64.v b.low);
assert (U64.v (carry a.low l) == 0);
sum_lt (U64.v a.low) (U64.v a.high * pow2 64) (U64.v b.low) (U64.v b.high * pow2 64);
assert (U64.v (U64.sub_mod a.high b.high) == U64.v a.high - U64.v b.high + pow2 64);
()
let sub_mod_wrap_ok (a b:t) : Lemma
(requires (v a - v b < 0))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
if U64.v a.low < U64.v b.low
then sub_mod_wrap1_ok a b
else sub_mod_wrap2_ok a b
#push-options "--z3rlimit 40"
let sub_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = (v a - v b) % pow2 128)) =
(if v a - v b >= 0
then sub_mod_pos_ok a b
else sub_mod_wrap_ok a b);
sub_mod_impl a b
#pop-options
val shift_bound : #n:nat -> num:UInt.uint_t n -> n':nat ->
Lemma (num * pow2 n' <= pow2 (n'+n) - pow2 n')
let shift_bound #n num n' =
Math.lemma_mult_le_right (pow2 n') num (pow2 n - 1);
Math.distributivity_sub_left (pow2 n) 1 (pow2 n');
Math.pow2_plus n' n
val append_uint : #n1:nat -> #n2:nat -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 -> UInt.uint_t (n1+n2)
let append_uint #n1 #n2 num1 num2 =
shift_bound num2 n1;
num1 + num2 * pow2 n1
val to_vec_append : #n1:nat{n1 > 0} -> #n2:nat{n2 > 0} -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 ->
Lemma (UInt.to_vec (append_uint num1 num2) == Seq.append (UInt.to_vec num2) (UInt.to_vec num1))
let to_vec_append #n1 #n2 num1 num2 =
UInt.append_lemma (UInt.to_vec num2) (UInt.to_vec num1)
let vec128 (a: t) : BV.bv_t 128 = UInt.to_vec #128 (v a)
let vec64 (a: U64.t) : BV.bv_t 64 = UInt.to_vec (U64.v a)
let to_vec_v (a: t) :
Lemma (vec128 a == Seq.append (vec64 a.high) (vec64 a.low)) =
to_vec_append (U64.v a.low) (U64.v a.high)
val logand_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2) ==
BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2))
(BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logand #128 (v a) (v b))) =
let r = { low = U64.logand a.low b.low;
high = U64.logand a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logand_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logand_vec (vec128 a) (vec128 b));
r
val logxor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2) ==
BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2))
(BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logxor #128 (v a) (v b))) =
let r = { low = U64.logxor a.low b.low;
high = U64.logxor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logxor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logxor_vec (vec128 a) (vec128 b));
r
val logor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2) ==
BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2))
(BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logor #128 (v a) (v b))) =
let r = { low = U64.logor a.low b.low;
high = U64.logor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logor_vec (vec128 a) (vec128 b));
r
val lognot_vec_append (#n1 #n2: pos) (a1: BV.bv_t n1) (a2: BV.bv_t n2) :
Lemma (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2) ==
BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot_vec_append #n1 #n2 a1 a2 =
Seq.lemma_eq_intro (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2))
(BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot (a: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.lognot #128 (v a))) =
let r = { low = U64.lognot a.low;
high = U64.lognot a.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
lognot_vec_append (vec64 a.high) (vec64 a.low);
to_vec_v a;
assert (vec128 r == BV.lognot_vec (vec128 a));
r
let mod_mul_cancel (n:nat) (k:nat{k > 0}) :
Lemma ((n * k) % k == 0) =
mod_spec (n * k) k;
mul_div_cancel n k;
()
let shift_past_mod (n:nat) (k1:nat) (k2:nat{k2 >= k1}) :
Lemma ((n * pow2 k2) % pow2 k1 == 0) =
assert (k2 == (k2 - k1) + k1);
Math.pow2_plus (k2 - k1) k1;
Math.paren_mul_right n (pow2 (k2 - k1)) (pow2 k1);
mod_mul_cancel (n * pow2 (k2 - k1)) (pow2 k1)
val mod_double: a:nat -> k:nat{k>0} ->
Lemma (a % k % k == a % k)
let mod_double a k =
mod_mod a k 1
let shift_left_large_val (#n1:nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s:nat) :
Lemma ((a1 + a2 * pow2 n1) * pow2 s == (a1 * pow2 s + a2 * pow2 (n1+s))) =
Math.distributivity_add_left a1 (a2 * pow2 n1) (pow2 s);
Math.paren_mul_right a2 (pow2 n1) (pow2 s);
Math.pow2_plus n1 s
#push-options "--z3rlimit 40"
let shift_left_large_lemma (#n1: nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s: nat{s >= n2}) :
Lemma (((a1 + a2 * pow2 n1) * pow2 s) % pow2 (n1+n2) ==
(a1 * pow2 s) % pow2 (n1+n2)) =
shift_left_large_val a1 a2 s;
mod_add (a1 * pow2 s) (a2 * pow2 (n1+s)) (pow2 (n1+n2));
shift_past_mod a2 (n1+n2) (n1+s);
mod_double (a1 * pow2 s) (pow2 (n1+n2));
()
#pop-options
val shift_left_large_lemma_t : a:t -> s:nat ->
Lemma (requires (s >= 64))
(ensures ((v a * pow2 s) % pow2 128 ==
(U64.v a.low * pow2 s) % pow2 128))
let shift_left_large_lemma_t a s =
shift_left_large_lemma #64 #64 (U64.v a.low) (U64.v a.high) s
private let u32_64: n:U32.t{U32.v n == 64} = U32.uint_to_t 64
val div_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} -> Lemma
(requires True)
(ensures (a / pow2 (n1 - n2) == a * pow2 n2 / pow2 n1))
let div_pow2_diff a n1 n2 =
Math.pow2_plus n2 (n1-n2);
assert (a * pow2 n2 / pow2 n1 == a * pow2 n2 / (pow2 n2 * pow2 (n1 - n2)));
div_product (a * pow2 n2) (pow2 n2) (pow2 (n1-n2));
mul_div_cancel a (pow2 n2)
val mod_mul_pow2 : n:nat -> e1:nat -> e2:nat ->
Lemma (n % pow2 e1 * pow2 e2 <= pow2 (e1+e2) - pow2 e2)
let mod_mul_pow2 n e1 e2 =
Math.lemma_mod_lt n (pow2 e1);
Math.lemma_mult_le_right (pow2 e2) (n % pow2 e1) (pow2 e1 - 1);
assert (n % pow2 e1 * pow2 e2 <= pow2 e1 * pow2 e2 - pow2 e2);
Math.pow2_plus e1 e2
let pow2_div_bound #b (n:UInt.uint_t b) (s:nat{s <= b}) :
Lemma (n / pow2 s < pow2 (b - s)) =
Math.lemma_div_lt n b s
#push-options "--smtencoding.l_arith_repr native --z3rlimit 40"
let add_u64_shift_left (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r = (U64.v hi * pow2 (U32.v s)) % pow2 64 + U64.v lo / pow2 (64 - U32.v s))) =
let high = U64.shift_left hi s in
let low = U64.shift_right lo (U32.sub u32_64 s) in
let s = U32.v s in
let high_n = U64.v hi % pow2 (64 - s) * pow2 s in
let low_n = U64.v lo / pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 s) (pow2 (64-s));
assert (U64.v high == high_n);
assert (U64.v low == low_n);
pow2_div_bound (U64.v lo) (64-s);
assert (low_n < pow2 s);
mod_mul_pow2 (U64.v hi) (64 - s) s;
U64.add high low
#pop-options
let div_plus_multiple (a:nat) (b:nat) (k:pos) :
Lemma (requires (a < k))
(ensures ((a + b * k) / k == b)) =
Math.division_addition_lemma a k b;
Math.small_division_lemma_1 a k
val div_add_small: n:nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (k1*m / (k1*k2) == (n + k1*m) / (k1*k2)))
let div_add_small n m k1 k2 =
div_product (k1*m) k1 k2;
div_product (n+k1*m) k1 k2;
mul_div_cancel m k1;
assert (k1*m/k1 == m);
div_plus_multiple n m k1
val add_mod_small: n: nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (n + (k1 * m) % (k1 * k2) ==
(n + k1 * m) % (k1 * k2)))
let add_mod_small n m k1 k2 =
mod_spec (k1 * m) (k1 * k2);
mod_spec (n + k1 * m) (k1 * k2);
div_add_small n m k1 k2
let mod_then_mul_64 (n:nat) : Lemma (n % pow2 64 * pow2 64 == n * pow2 64 % pow2 128) =
Math.pow2_plus 64 64;
mod_mul n (pow2 64) (pow2 64)
let mul_abc_to_acb (a b c: int) : Lemma (a * b * c == a * c * b) = ()
let add_u64_shift_left_respec (hi lo:U64.t) (s:U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r ->
U64.v r * pow2 64 ==
(U64.v hi * pow2 64) * pow2 (U32.v s) % pow2 128 +
U64.v lo * pow2 (U32.v s) / pow2 64 * pow2 64)) =
let r = add_u64_shift_left hi lo s in
let hi = U64.v hi in
let lo = U64.v lo in
let s = U32.v s in
// spec of add_u64_shift_left
assert (U64.v r == hi * pow2 s % pow2 64 + lo / pow2 (64 - s));
Math.distributivity_add_left (hi * pow2 s % pow2 64) (lo / pow2 (64-s)) (pow2 64);
mod_then_mul_64 (hi * pow2 s);
assert (hi * pow2 s % pow2 64 * pow2 64 == (hi * pow2 s * pow2 64) % pow2 128);
div_pow2_diff lo 64 s;
assert (lo / pow2 (64-s) == lo * pow2 s / pow2 64);
assert (U64.v r * pow2 64 == hi * pow2 s * pow2 64 % pow2 128 + lo * pow2 s / pow2 64 * pow2 64);
mul_abc_to_acb hi (pow2 s) (pow2 64);
r
val add_mod_small' : n:nat -> m:nat -> k:pos ->
Lemma (requires (n + m % k < k))
(ensures (n + m % k == (n + m) % k))
let add_mod_small' n m k =
Math.lemma_mod_lt (n + m % k) k;
Math.modulo_lemma n k;
mod_add n m k
#push-options "--retry 5"
let shift_t_val (a: t) (s: nat) :
Lemma (v a * pow2 s == U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s)) =
Math.pow2_plus 64 s;
()
#pop-options
val mul_mod_bound : n:nat -> s1:nat -> s2:nat{s2>=s1} ->
Lemma (n * pow2 s1 % pow2 s2 <= pow2 s2 - pow2 s1)
#push-options "--retry 5"
let mul_mod_bound n s1 s2 =
// n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1
// n % pow2 (s2-s1) <= pow2 (s2-s1) - 1
// n % pow2 (s2-s1) * pow2 s1 <= pow2 s2 - pow2 s1
mod_mul n (pow2 s1) (pow2 (s2-s1));
// assert (n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1);
Math.lemma_mod_lt n (pow2 (s2-s1));
Math.lemma_mult_le_right (pow2 s1) (n % pow2 (s2-s1)) (pow2 (s2-s1) - 1);
Math.pow2_plus (s2-s1) s1
#pop-options
let add_lt_le (a a' b b': int) :
Lemma (requires (a < a' /\ b <= b'))
(ensures (a + b < a' + b')) = ()
let u64_pow2_bound (a: UInt.uint_t 64) (s: nat) :
Lemma (a * pow2 s < pow2 (64+s)) =
Math.pow2_plus 64 s;
Math.lemma_mult_le_right (pow2 s) a (pow2 64)
let shift_t_mod_val' (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s) % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
u64_pow2_bound a_l s;
mul_mod_bound a_h (64+s) 128;
// assert (a_h * pow2 (64+s) % pow2 128 <= pow2 128 - pow2 (64+s));
add_lt_le (a_l * pow2 s) (pow2 (64+s)) (a_h * pow2 (64+s) % pow2 128) (pow2 128 - pow2 (64+s));
add_mod_small' (a_l * pow2 s) (a_h * pow2 (64+s)) (pow2 128);
shift_t_val a s;
()
let shift_t_mod_val (a: t) (s: nat{s < 64}) : | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val shift_t_mod_val (a: t) (s: nat{s < 64})
: Lemma
((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + (U64.v a.high * pow2 64) * pow2 s % pow2 128) | [] | FStar.UInt128.shift_t_mod_val | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: FStar.UInt128.t -> s: Prims.nat{s < 64}
-> FStar.Pervasives.Lemma
(ensures
FStar.UInt128.v a * Prims.pow2 s % Prims.pow2 128 ==
FStar.UInt64.v (Mkuint128?.low a) * Prims.pow2 s +
(FStar.UInt64.v (Mkuint128?.high a) * Prims.pow2 64) * Prims.pow2 s % Prims.pow2 128) | {
"end_col": 4,
"end_line": 674,
"start_col": 78,
"start_line": 668
} |
Prims.Tot | val u32_32:x: U32.t{U32.v x == 32} | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let u32_32 : x:U32.t{U32.v x == 32} = U32.uint_to_t 32 | val u32_32:x: U32.t{U32.v x == 32}
let u32_32:x: U32.t{U32.v x == 32} = | false | null | false | U32.uint_to_t 32 | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [
"total"
] | [
"FStar.UInt32.uint_to_t"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract
let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63
let carry_uint64_ok (a b:uint_t 64)
: squash (int2bv (carry_uint64 a b) == carry_bv a b)
= _ by (T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl())
let fact1 (a b: uint_t 64) = carry_bv a b == int2bv 1
let fact0 (a b: uint_t 64) = carry_bv a b == int2bv 0
let lem_ult_1 (a b: uint_t 64)
: squash (bvult (int2bv a) (int2bv b) ==> fact1 a b)
= assert (bvult (int2bv a) (int2bv b) ==> fact1 a b)
by (T.norm [delta_only [`%fact1;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'";
smt())
let lem_ult_2 (a b:uint_t 64)
: squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
= assert (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
by (T.norm [delta_only [`%fact0;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'")
let int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b))
= introduce (a < b) ==> (bvult #n (int2bv #n a) (int2bv #n b))
with _ . FStar.BV.int2bv_lemma_ult_1 a b;
introduce (bvult #n (int2bv #n a) (int2bv #n b)) ==> (a < b)
with _ . FStar.BV.int2bv_lemma_ult_2 a b
let lem_ult (a b:uint_t 64)
: Lemma (if a < b
then fact1 a b
else fact0 a b)
= int2bv_ult a b;
lem_ult_1 a b;
lem_ult_2 a b
let constant_time_carry (a b: U64.t) : Tot U64.t =
let open U64 in
// CONSTANT_TIME_CARRY macro
// ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
// 63 = sizeof(a) * 8 - 1
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63ul
let carry_uint64_equiv (a b:UInt64.t)
: Lemma (U64.v (constant_time_carry a b) == carry_uint64 (U64.v a) (U64.v b))
= ()
// This type gets a special treatment in KaRaMeL and its definition is never
// printed in the resulting C file.
type uint128: Type0 = { low: U64.t; high: U64.t }
let t = uint128
let _ = intro_ambient n
let _ = intro_ambient t
[@@ noextract_to "krml"]
let v x = U64.v x.low + (U64.v x.high) * (pow2 64)
let div_mod (x:nat) (k:nat{k > 0}) : Lemma (x / k * k + x % k == x) = ()
let uint_to_t x =
div_mod x (pow2 64);
{ low = U64.uint_to_t (x % (pow2 64));
high = U64.uint_to_t (x / (pow2 64)); }
let v_inj (x1 x2: t): Lemma (requires (v x1 == v x2))
(ensures x1 == x2) =
assert (uint_to_t (v x1) == uint_to_t (v x2));
assert (uint_to_t (v x1) == x1);
assert (uint_to_t (v x2) == x2);
()
(* A weird helper used below... seems like the native encoding of
bitvectors may be making these proofs much harder than they should be? *)
let bv2int_fun (#n:pos) (a b : bv_t n)
: Lemma (requires a == b)
(ensures bv2int a == bv2int b)
= ()
(* This proof is quite brittle. It has a bunch of annotations to get
decent verification performance. *)
let constant_time_carry_ok (a b:U64.t)
: Lemma (constant_time_carry a b ==
(if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0))
= calc (==) {
U64.v (constant_time_carry a b);
(==) { carry_uint64_equiv a b }
carry_uint64 (U64.v a) (U64.v b);
(==) { inverse_num_lemma (carry_uint64 (U64.v a) (U64.v b)) }
bv2int (int2bv (carry_uint64 (U64.v a) (U64.v b)));
(==) { carry_uint64_ok (U64.v a) (U64.v b);
bv2int_fun (int2bv (carry_uint64 (U64.v a) (U64.v b))) (carry_bv (U64.v a) (U64.v b));
()
}
bv2int (carry_bv (U64.v a) (U64.v b));
(==) {
lem_ult (U64.v a) (U64.v b);
bv2int_fun (carry_bv (U64.v a) (U64.v b)) (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
}
bv2int
(if U64.v a < U64.v b
then int2bv 1
else int2bv 0);
};
assert (
bv2int (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
== U64.v (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)) by (T.norm []);
U64.v_inj (constant_time_carry a b) (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)
let carry (a b: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r == (if U64.v a < U64.v b then 1 else 0))) =
constant_time_carry_ok a b;
constant_time_carry a b
let carry_sum_ok (a b:U64.t) :
Lemma (U64.v (carry (U64.add_mod a b) b) == (U64.v a + U64.v b) / (pow2 64)) = ()
let add (a b: t) : Pure t
(requires (v a + v b < pow2 128))
(ensures (fun r -> v a + v b = v r)) =
let l = U64.add_mod a.low b.low in
carry_sum_ok a.low b.low;
{ low = l;
high = U64.add (U64.add a.high b.high) (carry l b.low); }
let add_underspec (a b: t) =
let l = U64.add_mod a.low b.low in
begin
if v a + v b < pow2 128
then carry_sum_ok a.low b.low
else ()
end;
{ low = l;
high = U64.add_underspec (U64.add_underspec a.high b.high) (carry l b.low); }
val mod_mod: a:nat -> k:nat{k>0} -> k':nat{k'>0} ->
Lemma ((a % k) % (k'*k) == a % k)
let mod_mod a k k' =
assert (a % k < k);
assert (a % k < k' * k)
let mod_spec (a:nat) (k:nat{k > 0}) :
Lemma (a % k == a - a / k * k) = ()
val div_product : n:nat -> m1:nat{m1>0} -> m2:nat{m2>0} ->
Lemma (n / (m1*m2) == (n / m1) / m2)
let div_product n m1 m2 =
Math.division_multiplication_lemma n m1 m2
val mul_div_cancel : n:nat -> k:nat{k>0} ->
Lemma ((n * k) / k == n)
let mul_div_cancel n k =
Math.cancel_mul_div n k
val mod_mul: n:nat -> k1:pos -> k2:pos ->
Lemma ((n % k2) * k1 == (n * k1) % (k1*k2))
let mod_mul n k1 k2 =
Math.modulo_scale_lemma n k1 k2
let mod_spec_rew_n (n:nat) (k:nat{k > 0}) :
Lemma (n == n / k * k + n % k) = mod_spec n k
val mod_add: n1:nat -> n2:nat -> k:nat{k > 0} ->
Lemma ((n1 % k + n2 % k) % k == (n1 + n2) % k)
let mod_add n1 n2 k = Math.modulo_distributivity n1 n2 k
val mod_add_small: n1:nat -> n2:nat -> k:nat{k > 0} -> Lemma
(requires (n1 % k + n2 % k < k))
(ensures (n1 % k + n2 % k == (n1 + n2) % k))
let mod_add_small n1 n2 k =
mod_add n1 n2 k;
Math.small_modulo_lemma_1 (n1%k + n2%k) k
// This proof is pretty stable with the calc proof, but it can fail
// ~1% of the times, so add a retry.
#push-options "--split_queries no --z3rlimit 20 --retry 5"
let add_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a + v b) % pow2 128 = v r)) =
let l = U64.add_mod a.low b.low in
let r = { low = l;
high = U64.add_mod (U64.add_mod a.high b.high) (carry l b.low)} in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
let b_l = U64.v b.low in
let b_h = U64.v b.high in
carry_sum_ok a.low b.low;
Math.lemma_mod_plus_distr_l (a_h + b_h) ((a_l + b_l) / (pow2 64)) (pow2 64);
calc (==) {
U64.v r.high * pow2 64;
== {}
((a_h + b_h + (a_l + b_l) / (pow2 64)) % pow2 64) * pow2 64;
== { mod_mul (a_h + b_h + (a_l + b_l) / (pow2 64)) (pow2 64) (pow2 64) }
((a_h + b_h + (a_l + b_l) / (pow2 64)) * pow2 64) % (pow2 64 * pow2 64);
== {}
((a_h + b_h + (a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
== {}
(a_h * pow2 64 + b_h * pow2 64 + ((a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
};
assert (U64.v r.low == (U64.v r.low) % pow2 128);
mod_add_small (a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64))
((a_l + b_l) % (pow2 64))
(pow2 128);
assert (U64.v r.low + U64.v r.high * pow2 64 ==
(a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64) + (a_l + b_l) % (pow2 64)) % pow2 128);
mod_spec_rew_n (a_l + b_l) (pow2 64);
assert (v r ==
(a_h * pow2 64 +
b_h * pow2 64 +
a_l + b_l) % pow2 128);
assert_spinoff ((v a + v b) % pow2 128 = v r);
r
#pop-options
#push-options "--retry 5"
let sub (a b: t) : Pure t
(requires (v a - v b >= 0))
(ensures (fun r -> v r = v a - v b)) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub (U64.sub a.high b.high) (carry a.low l); }
#pop-options
let sub_underspec (a b: t) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_underspec (U64.sub_underspec a.high b.high) (carry a.low l); }
let sub_mod_impl (a b: t) : t =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_mod (U64.sub_mod a.high b.high) (carry a.low l); }
#push-options "--retry 10" // flaky
let sub_mod_pos_ok (a b:t) : Lemma
(requires (v a - v b >= 0))
(ensures (v (sub_mod_impl a b) = v a - v b)) =
assert (sub a b == sub_mod_impl a b);
()
#pop-options
val u64_diff_wrap : a:U64.t -> b:U64.t ->
Lemma (requires (U64.v a < U64.v b))
(ensures (U64.v (U64.sub_mod a b) == U64.v a - U64.v b + pow2 64))
let u64_diff_wrap a b = ()
#push-options "--z3rlimit 20"
val sub_mod_wrap1_ok : a:t -> b:t -> Lemma
(requires (v a - v b < 0 /\ U64.v a.low < U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128))
#push-options "--retry 10"
let sub_mod_wrap1_ok a b =
// carry == 1 and subtraction in low wraps
let l = U64.sub_mod a.low b.low in
assert (U64.v (carry a.low l) == 1);
u64_diff_wrap a.low b.low;
// a.high <= b.high since v a < v b;
// case split on equality and strictly less
if U64.v a.high = U64.v b.high then ()
else begin
u64_diff_wrap a.high b.high;
()
end
#pop-options
let sum_lt (a1 a2 b1 b2:nat) : Lemma
(requires (a1 + a2 < b1 + b2 /\ a1 >= b1))
(ensures (a2 < b2)) = ()
let sub_mod_wrap2_ok (a b:t) : Lemma
(requires (v a - v b < 0 /\ U64.v a.low >= U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
// carry == 0, subtraction in low is exact, but subtraction in high
// must wrap since v a < v b
let l = U64.sub_mod a.low b.low in
let r = sub_mod_impl a b in
assert (U64.v l == U64.v a.low - U64.v b.low);
assert (U64.v (carry a.low l) == 0);
sum_lt (U64.v a.low) (U64.v a.high * pow2 64) (U64.v b.low) (U64.v b.high * pow2 64);
assert (U64.v (U64.sub_mod a.high b.high) == U64.v a.high - U64.v b.high + pow2 64);
()
let sub_mod_wrap_ok (a b:t) : Lemma
(requires (v a - v b < 0))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
if U64.v a.low < U64.v b.low
then sub_mod_wrap1_ok a b
else sub_mod_wrap2_ok a b
#push-options "--z3rlimit 40"
let sub_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = (v a - v b) % pow2 128)) =
(if v a - v b >= 0
then sub_mod_pos_ok a b
else sub_mod_wrap_ok a b);
sub_mod_impl a b
#pop-options
val shift_bound : #n:nat -> num:UInt.uint_t n -> n':nat ->
Lemma (num * pow2 n' <= pow2 (n'+n) - pow2 n')
let shift_bound #n num n' =
Math.lemma_mult_le_right (pow2 n') num (pow2 n - 1);
Math.distributivity_sub_left (pow2 n) 1 (pow2 n');
Math.pow2_plus n' n
val append_uint : #n1:nat -> #n2:nat -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 -> UInt.uint_t (n1+n2)
let append_uint #n1 #n2 num1 num2 =
shift_bound num2 n1;
num1 + num2 * pow2 n1
val to_vec_append : #n1:nat{n1 > 0} -> #n2:nat{n2 > 0} -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 ->
Lemma (UInt.to_vec (append_uint num1 num2) == Seq.append (UInt.to_vec num2) (UInt.to_vec num1))
let to_vec_append #n1 #n2 num1 num2 =
UInt.append_lemma (UInt.to_vec num2) (UInt.to_vec num1)
let vec128 (a: t) : BV.bv_t 128 = UInt.to_vec #128 (v a)
let vec64 (a: U64.t) : BV.bv_t 64 = UInt.to_vec (U64.v a)
let to_vec_v (a: t) :
Lemma (vec128 a == Seq.append (vec64 a.high) (vec64 a.low)) =
to_vec_append (U64.v a.low) (U64.v a.high)
val logand_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2) ==
BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2))
(BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logand #128 (v a) (v b))) =
let r = { low = U64.logand a.low b.low;
high = U64.logand a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logand_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logand_vec (vec128 a) (vec128 b));
r
val logxor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2) ==
BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2))
(BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logxor #128 (v a) (v b))) =
let r = { low = U64.logxor a.low b.low;
high = U64.logxor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logxor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logxor_vec (vec128 a) (vec128 b));
r
val logor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2) ==
BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2))
(BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logor #128 (v a) (v b))) =
let r = { low = U64.logor a.low b.low;
high = U64.logor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logor_vec (vec128 a) (vec128 b));
r
val lognot_vec_append (#n1 #n2: pos) (a1: BV.bv_t n1) (a2: BV.bv_t n2) :
Lemma (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2) ==
BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot_vec_append #n1 #n2 a1 a2 =
Seq.lemma_eq_intro (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2))
(BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot (a: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.lognot #128 (v a))) =
let r = { low = U64.lognot a.low;
high = U64.lognot a.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
lognot_vec_append (vec64 a.high) (vec64 a.low);
to_vec_v a;
assert (vec128 r == BV.lognot_vec (vec128 a));
r
let mod_mul_cancel (n:nat) (k:nat{k > 0}) :
Lemma ((n * k) % k == 0) =
mod_spec (n * k) k;
mul_div_cancel n k;
()
let shift_past_mod (n:nat) (k1:nat) (k2:nat{k2 >= k1}) :
Lemma ((n * pow2 k2) % pow2 k1 == 0) =
assert (k2 == (k2 - k1) + k1);
Math.pow2_plus (k2 - k1) k1;
Math.paren_mul_right n (pow2 (k2 - k1)) (pow2 k1);
mod_mul_cancel (n * pow2 (k2 - k1)) (pow2 k1)
val mod_double: a:nat -> k:nat{k>0} ->
Lemma (a % k % k == a % k)
let mod_double a k =
mod_mod a k 1
let shift_left_large_val (#n1:nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s:nat) :
Lemma ((a1 + a2 * pow2 n1) * pow2 s == (a1 * pow2 s + a2 * pow2 (n1+s))) =
Math.distributivity_add_left a1 (a2 * pow2 n1) (pow2 s);
Math.paren_mul_right a2 (pow2 n1) (pow2 s);
Math.pow2_plus n1 s
#push-options "--z3rlimit 40"
let shift_left_large_lemma (#n1: nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s: nat{s >= n2}) :
Lemma (((a1 + a2 * pow2 n1) * pow2 s) % pow2 (n1+n2) ==
(a1 * pow2 s) % pow2 (n1+n2)) =
shift_left_large_val a1 a2 s;
mod_add (a1 * pow2 s) (a2 * pow2 (n1+s)) (pow2 (n1+n2));
shift_past_mod a2 (n1+n2) (n1+s);
mod_double (a1 * pow2 s) (pow2 (n1+n2));
()
#pop-options
val shift_left_large_lemma_t : a:t -> s:nat ->
Lemma (requires (s >= 64))
(ensures ((v a * pow2 s) % pow2 128 ==
(U64.v a.low * pow2 s) % pow2 128))
let shift_left_large_lemma_t a s =
shift_left_large_lemma #64 #64 (U64.v a.low) (U64.v a.high) s
private let u32_64: n:U32.t{U32.v n == 64} = U32.uint_to_t 64
val div_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} -> Lemma
(requires True)
(ensures (a / pow2 (n1 - n2) == a * pow2 n2 / pow2 n1))
let div_pow2_diff a n1 n2 =
Math.pow2_plus n2 (n1-n2);
assert (a * pow2 n2 / pow2 n1 == a * pow2 n2 / (pow2 n2 * pow2 (n1 - n2)));
div_product (a * pow2 n2) (pow2 n2) (pow2 (n1-n2));
mul_div_cancel a (pow2 n2)
val mod_mul_pow2 : n:nat -> e1:nat -> e2:nat ->
Lemma (n % pow2 e1 * pow2 e2 <= pow2 (e1+e2) - pow2 e2)
let mod_mul_pow2 n e1 e2 =
Math.lemma_mod_lt n (pow2 e1);
Math.lemma_mult_le_right (pow2 e2) (n % pow2 e1) (pow2 e1 - 1);
assert (n % pow2 e1 * pow2 e2 <= pow2 e1 * pow2 e2 - pow2 e2);
Math.pow2_plus e1 e2
let pow2_div_bound #b (n:UInt.uint_t b) (s:nat{s <= b}) :
Lemma (n / pow2 s < pow2 (b - s)) =
Math.lemma_div_lt n b s
#push-options "--smtencoding.l_arith_repr native --z3rlimit 40"
let add_u64_shift_left (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r = (U64.v hi * pow2 (U32.v s)) % pow2 64 + U64.v lo / pow2 (64 - U32.v s))) =
let high = U64.shift_left hi s in
let low = U64.shift_right lo (U32.sub u32_64 s) in
let s = U32.v s in
let high_n = U64.v hi % pow2 (64 - s) * pow2 s in
let low_n = U64.v lo / pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 s) (pow2 (64-s));
assert (U64.v high == high_n);
assert (U64.v low == low_n);
pow2_div_bound (U64.v lo) (64-s);
assert (low_n < pow2 s);
mod_mul_pow2 (U64.v hi) (64 - s) s;
U64.add high low
#pop-options
let div_plus_multiple (a:nat) (b:nat) (k:pos) :
Lemma (requires (a < k))
(ensures ((a + b * k) / k == b)) =
Math.division_addition_lemma a k b;
Math.small_division_lemma_1 a k
val div_add_small: n:nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (k1*m / (k1*k2) == (n + k1*m) / (k1*k2)))
let div_add_small n m k1 k2 =
div_product (k1*m) k1 k2;
div_product (n+k1*m) k1 k2;
mul_div_cancel m k1;
assert (k1*m/k1 == m);
div_plus_multiple n m k1
val add_mod_small: n: nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (n + (k1 * m) % (k1 * k2) ==
(n + k1 * m) % (k1 * k2)))
let add_mod_small n m k1 k2 =
mod_spec (k1 * m) (k1 * k2);
mod_spec (n + k1 * m) (k1 * k2);
div_add_small n m k1 k2
let mod_then_mul_64 (n:nat) : Lemma (n % pow2 64 * pow2 64 == n * pow2 64 % pow2 128) =
Math.pow2_plus 64 64;
mod_mul n (pow2 64) (pow2 64)
let mul_abc_to_acb (a b c: int) : Lemma (a * b * c == a * c * b) = ()
let add_u64_shift_left_respec (hi lo:U64.t) (s:U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r ->
U64.v r * pow2 64 ==
(U64.v hi * pow2 64) * pow2 (U32.v s) % pow2 128 +
U64.v lo * pow2 (U32.v s) / pow2 64 * pow2 64)) =
let r = add_u64_shift_left hi lo s in
let hi = U64.v hi in
let lo = U64.v lo in
let s = U32.v s in
// spec of add_u64_shift_left
assert (U64.v r == hi * pow2 s % pow2 64 + lo / pow2 (64 - s));
Math.distributivity_add_left (hi * pow2 s % pow2 64) (lo / pow2 (64-s)) (pow2 64);
mod_then_mul_64 (hi * pow2 s);
assert (hi * pow2 s % pow2 64 * pow2 64 == (hi * pow2 s * pow2 64) % pow2 128);
div_pow2_diff lo 64 s;
assert (lo / pow2 (64-s) == lo * pow2 s / pow2 64);
assert (U64.v r * pow2 64 == hi * pow2 s * pow2 64 % pow2 128 + lo * pow2 s / pow2 64 * pow2 64);
mul_abc_to_acb hi (pow2 s) (pow2 64);
r
val add_mod_small' : n:nat -> m:nat -> k:pos ->
Lemma (requires (n + m % k < k))
(ensures (n + m % k == (n + m) % k))
let add_mod_small' n m k =
Math.lemma_mod_lt (n + m % k) k;
Math.modulo_lemma n k;
mod_add n m k
#push-options "--retry 5"
let shift_t_val (a: t) (s: nat) :
Lemma (v a * pow2 s == U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s)) =
Math.pow2_plus 64 s;
()
#pop-options
val mul_mod_bound : n:nat -> s1:nat -> s2:nat{s2>=s1} ->
Lemma (n * pow2 s1 % pow2 s2 <= pow2 s2 - pow2 s1)
#push-options "--retry 5"
let mul_mod_bound n s1 s2 =
// n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1
// n % pow2 (s2-s1) <= pow2 (s2-s1) - 1
// n % pow2 (s2-s1) * pow2 s1 <= pow2 s2 - pow2 s1
mod_mul n (pow2 s1) (pow2 (s2-s1));
// assert (n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1);
Math.lemma_mod_lt n (pow2 (s2-s1));
Math.lemma_mult_le_right (pow2 s1) (n % pow2 (s2-s1)) (pow2 (s2-s1) - 1);
Math.pow2_plus (s2-s1) s1
#pop-options
let add_lt_le (a a' b b': int) :
Lemma (requires (a < a' /\ b <= b'))
(ensures (a + b < a' + b')) = ()
let u64_pow2_bound (a: UInt.uint_t 64) (s: nat) :
Lemma (a * pow2 s < pow2 (64+s)) =
Math.pow2_plus 64 s;
Math.lemma_mult_le_right (pow2 s) a (pow2 64)
let shift_t_mod_val' (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s) % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
u64_pow2_bound a_l s;
mul_mod_bound a_h (64+s) 128;
// assert (a_h * pow2 (64+s) % pow2 128 <= pow2 128 - pow2 (64+s));
add_lt_le (a_l * pow2 s) (pow2 (64+s)) (a_h * pow2 (64+s) % pow2 128) (pow2 128 - pow2 (64+s));
add_mod_small' (a_l * pow2 s) (a_h * pow2 (64+s)) (pow2 128);
shift_t_val a s;
()
let shift_t_mod_val (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + (U64.v a.high * pow2 64) * pow2 s % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
shift_t_mod_val' a s;
Math.pow2_plus 64 s;
Math.paren_mul_right a_h (pow2 64) (pow2 s);
()
#push-options "--z3rlimit 20"
let shift_left_small (a: t) (s: U32.t) : Pure t
(requires (U32.v s < 64))
(ensures (fun r -> v r = (v a * pow2 (U32.v s)) % pow2 128)) =
if U32.eq s 0ul then a
else
let r = { low = U64.shift_left a.low s;
high = add_u64_shift_left_respec a.high a.low s; } in
let s = U32.v s in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
mod_spec_rew_n (a_l * pow2 s) (pow2 64);
shift_t_mod_val a s;
r
#pop-options
val shift_left_large : a:t -> s:U32.t{U32.v s >= 64 /\ U32.v s < 128} ->
r:t{v r = (v a * pow2 (U32.v s)) % pow2 128}
#push-options "--z3rlimit 50 --retry 5" // sporadically fails
let shift_left_large a s =
let h_shift = U32.sub s u32_64 in
assert (U32.v h_shift < 64);
let r = { low = U64.uint_to_t 0;
high = U64.shift_left a.low h_shift; } in
assert (U64.v r.high == (U64.v a.low * pow2 (U32.v s - 64)) % pow2 64);
mod_mul (U64.v a.low * pow2 (U32.v s - 64)) (pow2 64) (pow2 64);
Math.pow2_plus (U32.v s - 64) 64;
assert (U64.v r.high * pow2 64 == (U64.v a.low * pow2 (U32.v s)) % pow2 128);
shift_left_large_lemma_t a (U32.v s);
r
#pop-options
let shift_left a s =
if (U32.lt s u32_64) then shift_left_small a s
else shift_left_large a s
let add_u64_shift_right (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r == U64.v lo / pow2 (U32.v s) +
U64.v hi * pow2 (64 - U32.v s) % pow2 64)) =
let low = U64.shift_right lo s in
let high = U64.shift_left hi (U32.sub u32_64 s) in
let s = U32.v s in
let low_n = U64.v lo / pow2 s in
let high_n = U64.v hi % pow2 s * pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 (64-s)) (pow2 s);
assert (U64.v high == high_n);
pow2_div_bound (U64.v lo) s;
assert (low_n < pow2 (64 - s));
mod_mul_pow2 (U64.v hi) s (64 - s);
U64.add low high
val mul_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} ->
Lemma (a * pow2 (n1 - n2) == a * pow2 n1 / pow2 n2)
let mul_pow2_diff a n1 n2 =
Math.paren_mul_right a (pow2 (n1-n2)) (pow2 n2);
mul_div_cancel (a * pow2 (n1 - n2)) (pow2 n2);
Math.pow2_plus (n1 - n2) n2
let add_u64_shift_right_respec (hi lo:U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r == U64.v lo / pow2 (U32.v s) +
U64.v hi * pow2 64 / pow2 (U32.v s) % pow2 64)) =
let r = add_u64_shift_right hi lo s in
let s = U32.v s in
mul_pow2_diff (U64.v hi) 64 s;
r
let mul_div_spec (n:nat) (k:pos) : Lemma (n / k * k == n - n % k) = ()
let mul_distr_sub (n1 n2:nat) (k:nat) : Lemma ((n1 - n2) * k == n1 * k - n2 * k) = ()
val div_product_comm : n1:nat -> k1:pos -> k2:pos ->
Lemma (n1 / k1 / k2 == n1 / k2 / k1)
let div_product_comm n1 k1 k2 =
div_product n1 k1 k2;
div_product n1 k2 k1
val shift_right_reconstruct : a_h:UInt.uint_t 64 -> s:nat{s < 64} ->
Lemma (a_h * pow2 (64-s) == a_h / pow2 s * pow2 64 + a_h * pow2 64 / pow2 s % pow2 64)
let shift_right_reconstruct a_h s =
mul_pow2_diff a_h 64 s;
mod_spec_rew_n (a_h * pow2 (64-s)) (pow2 64);
div_product_comm (a_h * pow2 64) (pow2 s) (pow2 64);
mul_div_cancel a_h (pow2 64);
assert (a_h / pow2 s * pow2 64 == a_h * pow2 64 / pow2 s / pow2 64 * pow2 64);
()
val u128_div_pow2 (a: t) (s:nat{s < 64}) :
Lemma (v a / pow2 s == U64.v a.low / pow2 s + U64.v a.high * pow2 (64 - s))
let u128_div_pow2 a s =
Math.pow2_plus (64-s) s;
Math.paren_mul_right (U64.v a.high) (pow2 (64-s)) (pow2 s);
Math.division_addition_lemma (U64.v a.low) (pow2 s) (U64.v a.high * pow2 (64 - s))
let shift_right_small (a: t) (s: U32.t{U32.v s < 64}) : Pure t
(requires True)
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
if U32.eq s 0ul then a
else
let r = { low = add_u64_shift_right_respec a.high a.low s;
high = U64.shift_right a.high s; } in
let a_h = U64.v a.high in
let a_l = U64.v a.low in
let s = U32.v s in
shift_right_reconstruct a_h s;
assert (v r == a_h * pow2 (64-s) + a_l / pow2 s);
u128_div_pow2 a s;
r
let shift_right_large (a: t) (s: U32.t{U32.v s >= 64 /\ U32.v s < 128}) : Pure t
(requires True)
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
let r = { high = U64.uint_to_t 0;
low = U64.shift_right a.high (U32.sub s u32_64); } in
let s = U32.v s in
Math.pow2_plus 64 (s - 64);
div_product (v a) (pow2 64) (pow2 (s - 64));
assert (v a / pow2 s == v a / pow2 64 / pow2 (s - 64));
div_plus_multiple (U64.v a.low) (U64.v a.high) (pow2 64);
r
let shift_right (a: t) (s: U32.t) : Pure t
(requires (U32.v s < 128))
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
if U32.lt s u32_64
then shift_right_small a s
else shift_right_large a s
let eq (a b:t) = U64.eq a.low b.low && U64.eq a.high b.high
let gt (a b:t) = U64.gt a.high b.high ||
(U64.eq a.high b.high && U64.gt a.low b.low)
let lt (a b:t) = U64.lt a.high b.high ||
(U64.eq a.high b.high && U64.lt a.low b.low)
let gte (a b:t) = U64.gt a.high b.high ||
(U64.eq a.high b.high && U64.gte a.low b.low)
let lte (a b:t) = U64.lt a.high b.high ||
(U64.eq a.high b.high && U64.lte a.low b.low)
let u64_logand_comm (a b:U64.t) : Lemma (U64.logand a b == U64.logand b a) =
UInt.logand_commutative (U64.v a) (U64.v b)
val u64_and_0 (a b:U64.t) :
Lemma (U64.v b = 0 ==> U64.v (U64.logand a b) = 0)
[SMTPat (U64.logand a b)]
let u64_and_0 a b = UInt.logand_lemma_1 (U64.v a)
let u64_0_and (a b:U64.t) :
Lemma (U64.v a = 0 ==> U64.v (U64.logand a b) = 0)
[SMTPat (U64.logand a b)] =
u64_logand_comm a b
val u64_1s_and (a b:U64.t) :
Lemma (U64.v a = pow2 64 - 1 /\
U64.v b = pow2 64 - 1 ==> U64.v (U64.logand a b) = pow2 64 - 1)
[SMTPat (U64.logand a b)]
let u64_1s_and a b = UInt.logand_lemma_2 (U64.v a)
let eq_mask (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a = v b ==> v r = pow2 128 - 1) /\ (v a <> v b ==> v r = 0))) =
let mask = U64.logand (U64.eq_mask a.low b.low)
(U64.eq_mask a.high b.high) in
{ low = mask; high = mask; }
private let gte_characterization (a b: t) :
Lemma (v a >= v b ==>
U64.v a.high > U64.v b.high \/
(U64.v a.high = U64.v b.high /\ U64.v a.low >= U64.v b.low)) = ()
private let lt_characterization (a b: t) :
Lemma (v a < v b ==>
U64.v a.high < U64.v b.high \/
(U64.v a.high = U64.v b.high /\ U64.v a.low < U64.v b.low)) = ()
let u64_logor_comm (a b:U64.t) : Lemma (U64.logor a b == U64.logor b a) =
UInt.logor_commutative (U64.v a) (U64.v b)
val u64_or_1 (a b:U64.t) :
Lemma (U64.v b = pow2 64 - 1 ==> U64.v (U64.logor a b) = pow2 64 - 1)
[SMTPat (U64.logor a b)]
let u64_or_1 a b = UInt.logor_lemma_2 (U64.v a)
let u64_1_or (a b:U64.t) :
Lemma (U64.v a = pow2 64 - 1 ==> U64.v (U64.logor a b) = pow2 64 - 1)
[SMTPat (U64.logor a b)] =
u64_logor_comm a b
val u64_or_0 (a b:U64.t) :
Lemma (U64.v a = 0 /\ U64.v b = 0 ==> U64.v (U64.logor a b) = 0)
[SMTPat (U64.logor a b)]
let u64_or_0 a b = UInt.logor_lemma_1 (U64.v a)
val u64_not_0 (a:U64.t) :
Lemma (U64.v a = 0 ==> U64.v (U64.lognot a) = pow2 64 - 1)
[SMTPat (U64.lognot a)]
let u64_not_0 a = UInt.lognot_lemma_1 #64
val u64_not_1 (a:U64.t) :
Lemma (U64.v a = pow2 64 - 1 ==> U64.v (U64.lognot a) = 0)
[SMTPat (U64.lognot a)]
let u64_not_1 a =
UInt.nth_lemma (UInt.lognot #64 (UInt.ones 64)) (UInt.zero 64)
let gte_mask (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a >= v b ==> v r = pow2 128 - 1) /\ (v a < v b ==> v r = 0))) =
let mask_hi_gte = U64.logand (U64.gte_mask a.high b.high)
(U64.lognot (U64.eq_mask a.high b.high)) in
let mask_lo_gte = U64.logand (U64.eq_mask a.high b.high)
(U64.gte_mask a.low b.low) in
let mask = U64.logor mask_hi_gte mask_lo_gte in
gte_characterization a b;
lt_characterization a b;
{ low = mask; high = mask; }
let uint64_to_uint128 (a:U64.t) = { low = a; high = U64.uint_to_t 0; }
let uint128_to_uint64 (a:t) : b:U64.t{U64.v b == v a % pow2 64} = a.low
inline_for_extraction
let u64_l32_mask: x:U64.t{U64.v x == pow2 32 - 1} = U64.uint_to_t 0xffffffff
let u64_mod_32 (a: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r = U64.v a % pow2 32)) =
UInt.logand_mask (U64.v a) 32;
U64.logand a u64_l32_mask
let u64_32_digits (a: U64.t) : Lemma (U64.v a / pow2 32 * pow2 32 + U64.v a % pow2 32 == U64.v a) =
div_mod (U64.v a) (pow2 32)
val mul32_digits : x:UInt.uint_t 64 -> y:UInt.uint_t 32 ->
Lemma (x * y == (x / pow2 32 * y) * pow2 32 + (x % pow2 32) * y)
let mul32_digits x y = () | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val u32_32:x: U32.t{U32.v x == 32} | [] | FStar.UInt128.u32_32 | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | x: FStar.UInt32.t{FStar.UInt32.v x == 32} | {
"end_col": 54,
"end_line": 914,
"start_col": 38,
"start_line": 914
} |
FStar.Pervasives.Lemma | val mod_add_small: n1:nat -> n2:nat -> k:nat{k > 0} -> Lemma
(requires (n1 % k + n2 % k < k))
(ensures (n1 % k + n2 % k == (n1 + n2) % k)) | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let mod_add_small n1 n2 k =
mod_add n1 n2 k;
Math.small_modulo_lemma_1 (n1%k + n2%k) k | val mod_add_small: n1:nat -> n2:nat -> k:nat{k > 0} -> Lemma
(requires (n1 % k + n2 % k < k))
(ensures (n1 % k + n2 % k == (n1 + n2) % k))
let mod_add_small n1 n2 k = | false | null | true | mod_add n1 n2 k;
Math.small_modulo_lemma_1 (n1 % k + n2 % k) k | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [
"lemma"
] | [
"Prims.nat",
"Prims.b2t",
"Prims.op_GreaterThan",
"FStar.Math.Lemmas.small_modulo_lemma_1",
"Prims.op_Addition",
"Prims.op_Modulus",
"Prims.unit",
"FStar.UInt128.mod_add"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract
let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63
let carry_uint64_ok (a b:uint_t 64)
: squash (int2bv (carry_uint64 a b) == carry_bv a b)
= _ by (T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl())
let fact1 (a b: uint_t 64) = carry_bv a b == int2bv 1
let fact0 (a b: uint_t 64) = carry_bv a b == int2bv 0
let lem_ult_1 (a b: uint_t 64)
: squash (bvult (int2bv a) (int2bv b) ==> fact1 a b)
= assert (bvult (int2bv a) (int2bv b) ==> fact1 a b)
by (T.norm [delta_only [`%fact1;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'";
smt())
let lem_ult_2 (a b:uint_t 64)
: squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
= assert (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
by (T.norm [delta_only [`%fact0;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'")
let int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b))
= introduce (a < b) ==> (bvult #n (int2bv #n a) (int2bv #n b))
with _ . FStar.BV.int2bv_lemma_ult_1 a b;
introduce (bvult #n (int2bv #n a) (int2bv #n b)) ==> (a < b)
with _ . FStar.BV.int2bv_lemma_ult_2 a b
let lem_ult (a b:uint_t 64)
: Lemma (if a < b
then fact1 a b
else fact0 a b)
= int2bv_ult a b;
lem_ult_1 a b;
lem_ult_2 a b
let constant_time_carry (a b: U64.t) : Tot U64.t =
let open U64 in
// CONSTANT_TIME_CARRY macro
// ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
// 63 = sizeof(a) * 8 - 1
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63ul
let carry_uint64_equiv (a b:UInt64.t)
: Lemma (U64.v (constant_time_carry a b) == carry_uint64 (U64.v a) (U64.v b))
= ()
// This type gets a special treatment in KaRaMeL and its definition is never
// printed in the resulting C file.
type uint128: Type0 = { low: U64.t; high: U64.t }
let t = uint128
let _ = intro_ambient n
let _ = intro_ambient t
[@@ noextract_to "krml"]
let v x = U64.v x.low + (U64.v x.high) * (pow2 64)
let div_mod (x:nat) (k:nat{k > 0}) : Lemma (x / k * k + x % k == x) = ()
let uint_to_t x =
div_mod x (pow2 64);
{ low = U64.uint_to_t (x % (pow2 64));
high = U64.uint_to_t (x / (pow2 64)); }
let v_inj (x1 x2: t): Lemma (requires (v x1 == v x2))
(ensures x1 == x2) =
assert (uint_to_t (v x1) == uint_to_t (v x2));
assert (uint_to_t (v x1) == x1);
assert (uint_to_t (v x2) == x2);
()
(* A weird helper used below... seems like the native encoding of
bitvectors may be making these proofs much harder than they should be? *)
let bv2int_fun (#n:pos) (a b : bv_t n)
: Lemma (requires a == b)
(ensures bv2int a == bv2int b)
= ()
(* This proof is quite brittle. It has a bunch of annotations to get
decent verification performance. *)
let constant_time_carry_ok (a b:U64.t)
: Lemma (constant_time_carry a b ==
(if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0))
= calc (==) {
U64.v (constant_time_carry a b);
(==) { carry_uint64_equiv a b }
carry_uint64 (U64.v a) (U64.v b);
(==) { inverse_num_lemma (carry_uint64 (U64.v a) (U64.v b)) }
bv2int (int2bv (carry_uint64 (U64.v a) (U64.v b)));
(==) { carry_uint64_ok (U64.v a) (U64.v b);
bv2int_fun (int2bv (carry_uint64 (U64.v a) (U64.v b))) (carry_bv (U64.v a) (U64.v b));
()
}
bv2int (carry_bv (U64.v a) (U64.v b));
(==) {
lem_ult (U64.v a) (U64.v b);
bv2int_fun (carry_bv (U64.v a) (U64.v b)) (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
}
bv2int
(if U64.v a < U64.v b
then int2bv 1
else int2bv 0);
};
assert (
bv2int (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
== U64.v (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)) by (T.norm []);
U64.v_inj (constant_time_carry a b) (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)
let carry (a b: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r == (if U64.v a < U64.v b then 1 else 0))) =
constant_time_carry_ok a b;
constant_time_carry a b
let carry_sum_ok (a b:U64.t) :
Lemma (U64.v (carry (U64.add_mod a b) b) == (U64.v a + U64.v b) / (pow2 64)) = ()
let add (a b: t) : Pure t
(requires (v a + v b < pow2 128))
(ensures (fun r -> v a + v b = v r)) =
let l = U64.add_mod a.low b.low in
carry_sum_ok a.low b.low;
{ low = l;
high = U64.add (U64.add a.high b.high) (carry l b.low); }
let add_underspec (a b: t) =
let l = U64.add_mod a.low b.low in
begin
if v a + v b < pow2 128
then carry_sum_ok a.low b.low
else ()
end;
{ low = l;
high = U64.add_underspec (U64.add_underspec a.high b.high) (carry l b.low); }
val mod_mod: a:nat -> k:nat{k>0} -> k':nat{k'>0} ->
Lemma ((a % k) % (k'*k) == a % k)
let mod_mod a k k' =
assert (a % k < k);
assert (a % k < k' * k)
let mod_spec (a:nat) (k:nat{k > 0}) :
Lemma (a % k == a - a / k * k) = ()
val div_product : n:nat -> m1:nat{m1>0} -> m2:nat{m2>0} ->
Lemma (n / (m1*m2) == (n / m1) / m2)
let div_product n m1 m2 =
Math.division_multiplication_lemma n m1 m2
val mul_div_cancel : n:nat -> k:nat{k>0} ->
Lemma ((n * k) / k == n)
let mul_div_cancel n k =
Math.cancel_mul_div n k
val mod_mul: n:nat -> k1:pos -> k2:pos ->
Lemma ((n % k2) * k1 == (n * k1) % (k1*k2))
let mod_mul n k1 k2 =
Math.modulo_scale_lemma n k1 k2
let mod_spec_rew_n (n:nat) (k:nat{k > 0}) :
Lemma (n == n / k * k + n % k) = mod_spec n k
val mod_add: n1:nat -> n2:nat -> k:nat{k > 0} ->
Lemma ((n1 % k + n2 % k) % k == (n1 + n2) % k)
let mod_add n1 n2 k = Math.modulo_distributivity n1 n2 k
val mod_add_small: n1:nat -> n2:nat -> k:nat{k > 0} -> Lemma
(requires (n1 % k + n2 % k < k))
(ensures (n1 % k + n2 % k == (n1 + n2) % k)) | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val mod_add_small: n1:nat -> n2:nat -> k:nat{k > 0} -> Lemma
(requires (n1 % k + n2 % k < k))
(ensures (n1 % k + n2 % k == (n1 + n2) % k)) | [] | FStar.UInt128.mod_add_small | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | n1: Prims.nat -> n2: Prims.nat -> k: Prims.nat{k > 0}
-> FStar.Pervasives.Lemma (requires n1 % k + n2 % k < k)
(ensures n1 % k + n2 % k == (n1 + n2) % k) | {
"end_col": 43,
"end_line": 235,
"start_col": 2,
"start_line": 234
} |
Prims.Tot | val h32 (x: UInt.uint_t 64) : UInt.uint_t 32 | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let h32 (x: UInt.uint_t 64) : UInt.uint_t 32 = x / pow2 32 | val h32 (x: UInt.uint_t 64) : UInt.uint_t 32
let h32 (x: UInt.uint_t 64) : UInt.uint_t 32 = | false | null | false | x / pow2 32 | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [
"total"
] | [
"FStar.UInt.uint_t",
"Prims.op_Division",
"Prims.pow2"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract
let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63
let carry_uint64_ok (a b:uint_t 64)
: squash (int2bv (carry_uint64 a b) == carry_bv a b)
= _ by (T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl())
let fact1 (a b: uint_t 64) = carry_bv a b == int2bv 1
let fact0 (a b: uint_t 64) = carry_bv a b == int2bv 0
let lem_ult_1 (a b: uint_t 64)
: squash (bvult (int2bv a) (int2bv b) ==> fact1 a b)
= assert (bvult (int2bv a) (int2bv b) ==> fact1 a b)
by (T.norm [delta_only [`%fact1;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'";
smt())
let lem_ult_2 (a b:uint_t 64)
: squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
= assert (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
by (T.norm [delta_only [`%fact0;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'")
let int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b))
= introduce (a < b) ==> (bvult #n (int2bv #n a) (int2bv #n b))
with _ . FStar.BV.int2bv_lemma_ult_1 a b;
introduce (bvult #n (int2bv #n a) (int2bv #n b)) ==> (a < b)
with _ . FStar.BV.int2bv_lemma_ult_2 a b
let lem_ult (a b:uint_t 64)
: Lemma (if a < b
then fact1 a b
else fact0 a b)
= int2bv_ult a b;
lem_ult_1 a b;
lem_ult_2 a b
let constant_time_carry (a b: U64.t) : Tot U64.t =
let open U64 in
// CONSTANT_TIME_CARRY macro
// ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
// 63 = sizeof(a) * 8 - 1
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63ul
let carry_uint64_equiv (a b:UInt64.t)
: Lemma (U64.v (constant_time_carry a b) == carry_uint64 (U64.v a) (U64.v b))
= ()
// This type gets a special treatment in KaRaMeL and its definition is never
// printed in the resulting C file.
type uint128: Type0 = { low: U64.t; high: U64.t }
let t = uint128
let _ = intro_ambient n
let _ = intro_ambient t
[@@ noextract_to "krml"]
let v x = U64.v x.low + (U64.v x.high) * (pow2 64)
let div_mod (x:nat) (k:nat{k > 0}) : Lemma (x / k * k + x % k == x) = ()
let uint_to_t x =
div_mod x (pow2 64);
{ low = U64.uint_to_t (x % (pow2 64));
high = U64.uint_to_t (x / (pow2 64)); }
let v_inj (x1 x2: t): Lemma (requires (v x1 == v x2))
(ensures x1 == x2) =
assert (uint_to_t (v x1) == uint_to_t (v x2));
assert (uint_to_t (v x1) == x1);
assert (uint_to_t (v x2) == x2);
()
(* A weird helper used below... seems like the native encoding of
bitvectors may be making these proofs much harder than they should be? *)
let bv2int_fun (#n:pos) (a b : bv_t n)
: Lemma (requires a == b)
(ensures bv2int a == bv2int b)
= ()
(* This proof is quite brittle. It has a bunch of annotations to get
decent verification performance. *)
let constant_time_carry_ok (a b:U64.t)
: Lemma (constant_time_carry a b ==
(if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0))
= calc (==) {
U64.v (constant_time_carry a b);
(==) { carry_uint64_equiv a b }
carry_uint64 (U64.v a) (U64.v b);
(==) { inverse_num_lemma (carry_uint64 (U64.v a) (U64.v b)) }
bv2int (int2bv (carry_uint64 (U64.v a) (U64.v b)));
(==) { carry_uint64_ok (U64.v a) (U64.v b);
bv2int_fun (int2bv (carry_uint64 (U64.v a) (U64.v b))) (carry_bv (U64.v a) (U64.v b));
()
}
bv2int (carry_bv (U64.v a) (U64.v b));
(==) {
lem_ult (U64.v a) (U64.v b);
bv2int_fun (carry_bv (U64.v a) (U64.v b)) (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
}
bv2int
(if U64.v a < U64.v b
then int2bv 1
else int2bv 0);
};
assert (
bv2int (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
== U64.v (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)) by (T.norm []);
U64.v_inj (constant_time_carry a b) (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)
let carry (a b: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r == (if U64.v a < U64.v b then 1 else 0))) =
constant_time_carry_ok a b;
constant_time_carry a b
let carry_sum_ok (a b:U64.t) :
Lemma (U64.v (carry (U64.add_mod a b) b) == (U64.v a + U64.v b) / (pow2 64)) = ()
let add (a b: t) : Pure t
(requires (v a + v b < pow2 128))
(ensures (fun r -> v a + v b = v r)) =
let l = U64.add_mod a.low b.low in
carry_sum_ok a.low b.low;
{ low = l;
high = U64.add (U64.add a.high b.high) (carry l b.low); }
let add_underspec (a b: t) =
let l = U64.add_mod a.low b.low in
begin
if v a + v b < pow2 128
then carry_sum_ok a.low b.low
else ()
end;
{ low = l;
high = U64.add_underspec (U64.add_underspec a.high b.high) (carry l b.low); }
val mod_mod: a:nat -> k:nat{k>0} -> k':nat{k'>0} ->
Lemma ((a % k) % (k'*k) == a % k)
let mod_mod a k k' =
assert (a % k < k);
assert (a % k < k' * k)
let mod_spec (a:nat) (k:nat{k > 0}) :
Lemma (a % k == a - a / k * k) = ()
val div_product : n:nat -> m1:nat{m1>0} -> m2:nat{m2>0} ->
Lemma (n / (m1*m2) == (n / m1) / m2)
let div_product n m1 m2 =
Math.division_multiplication_lemma n m1 m2
val mul_div_cancel : n:nat -> k:nat{k>0} ->
Lemma ((n * k) / k == n)
let mul_div_cancel n k =
Math.cancel_mul_div n k
val mod_mul: n:nat -> k1:pos -> k2:pos ->
Lemma ((n % k2) * k1 == (n * k1) % (k1*k2))
let mod_mul n k1 k2 =
Math.modulo_scale_lemma n k1 k2
let mod_spec_rew_n (n:nat) (k:nat{k > 0}) :
Lemma (n == n / k * k + n % k) = mod_spec n k
val mod_add: n1:nat -> n2:nat -> k:nat{k > 0} ->
Lemma ((n1 % k + n2 % k) % k == (n1 + n2) % k)
let mod_add n1 n2 k = Math.modulo_distributivity n1 n2 k
val mod_add_small: n1:nat -> n2:nat -> k:nat{k > 0} -> Lemma
(requires (n1 % k + n2 % k < k))
(ensures (n1 % k + n2 % k == (n1 + n2) % k))
let mod_add_small n1 n2 k =
mod_add n1 n2 k;
Math.small_modulo_lemma_1 (n1%k + n2%k) k
// This proof is pretty stable with the calc proof, but it can fail
// ~1% of the times, so add a retry.
#push-options "--split_queries no --z3rlimit 20 --retry 5"
let add_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a + v b) % pow2 128 = v r)) =
let l = U64.add_mod a.low b.low in
let r = { low = l;
high = U64.add_mod (U64.add_mod a.high b.high) (carry l b.low)} in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
let b_l = U64.v b.low in
let b_h = U64.v b.high in
carry_sum_ok a.low b.low;
Math.lemma_mod_plus_distr_l (a_h + b_h) ((a_l + b_l) / (pow2 64)) (pow2 64);
calc (==) {
U64.v r.high * pow2 64;
== {}
((a_h + b_h + (a_l + b_l) / (pow2 64)) % pow2 64) * pow2 64;
== { mod_mul (a_h + b_h + (a_l + b_l) / (pow2 64)) (pow2 64) (pow2 64) }
((a_h + b_h + (a_l + b_l) / (pow2 64)) * pow2 64) % (pow2 64 * pow2 64);
== {}
((a_h + b_h + (a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
== {}
(a_h * pow2 64 + b_h * pow2 64 + ((a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
};
assert (U64.v r.low == (U64.v r.low) % pow2 128);
mod_add_small (a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64))
((a_l + b_l) % (pow2 64))
(pow2 128);
assert (U64.v r.low + U64.v r.high * pow2 64 ==
(a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64) + (a_l + b_l) % (pow2 64)) % pow2 128);
mod_spec_rew_n (a_l + b_l) (pow2 64);
assert (v r ==
(a_h * pow2 64 +
b_h * pow2 64 +
a_l + b_l) % pow2 128);
assert_spinoff ((v a + v b) % pow2 128 = v r);
r
#pop-options
#push-options "--retry 5"
let sub (a b: t) : Pure t
(requires (v a - v b >= 0))
(ensures (fun r -> v r = v a - v b)) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub (U64.sub a.high b.high) (carry a.low l); }
#pop-options
let sub_underspec (a b: t) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_underspec (U64.sub_underspec a.high b.high) (carry a.low l); }
let sub_mod_impl (a b: t) : t =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_mod (U64.sub_mod a.high b.high) (carry a.low l); }
#push-options "--retry 10" // flaky
let sub_mod_pos_ok (a b:t) : Lemma
(requires (v a - v b >= 0))
(ensures (v (sub_mod_impl a b) = v a - v b)) =
assert (sub a b == sub_mod_impl a b);
()
#pop-options
val u64_diff_wrap : a:U64.t -> b:U64.t ->
Lemma (requires (U64.v a < U64.v b))
(ensures (U64.v (U64.sub_mod a b) == U64.v a - U64.v b + pow2 64))
let u64_diff_wrap a b = ()
#push-options "--z3rlimit 20"
val sub_mod_wrap1_ok : a:t -> b:t -> Lemma
(requires (v a - v b < 0 /\ U64.v a.low < U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128))
#push-options "--retry 10"
let sub_mod_wrap1_ok a b =
// carry == 1 and subtraction in low wraps
let l = U64.sub_mod a.low b.low in
assert (U64.v (carry a.low l) == 1);
u64_diff_wrap a.low b.low;
// a.high <= b.high since v a < v b;
// case split on equality and strictly less
if U64.v a.high = U64.v b.high then ()
else begin
u64_diff_wrap a.high b.high;
()
end
#pop-options
let sum_lt (a1 a2 b1 b2:nat) : Lemma
(requires (a1 + a2 < b1 + b2 /\ a1 >= b1))
(ensures (a2 < b2)) = ()
let sub_mod_wrap2_ok (a b:t) : Lemma
(requires (v a - v b < 0 /\ U64.v a.low >= U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
// carry == 0, subtraction in low is exact, but subtraction in high
// must wrap since v a < v b
let l = U64.sub_mod a.low b.low in
let r = sub_mod_impl a b in
assert (U64.v l == U64.v a.low - U64.v b.low);
assert (U64.v (carry a.low l) == 0);
sum_lt (U64.v a.low) (U64.v a.high * pow2 64) (U64.v b.low) (U64.v b.high * pow2 64);
assert (U64.v (U64.sub_mod a.high b.high) == U64.v a.high - U64.v b.high + pow2 64);
()
let sub_mod_wrap_ok (a b:t) : Lemma
(requires (v a - v b < 0))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
if U64.v a.low < U64.v b.low
then sub_mod_wrap1_ok a b
else sub_mod_wrap2_ok a b
#push-options "--z3rlimit 40"
let sub_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = (v a - v b) % pow2 128)) =
(if v a - v b >= 0
then sub_mod_pos_ok a b
else sub_mod_wrap_ok a b);
sub_mod_impl a b
#pop-options
val shift_bound : #n:nat -> num:UInt.uint_t n -> n':nat ->
Lemma (num * pow2 n' <= pow2 (n'+n) - pow2 n')
let shift_bound #n num n' =
Math.lemma_mult_le_right (pow2 n') num (pow2 n - 1);
Math.distributivity_sub_left (pow2 n) 1 (pow2 n');
Math.pow2_plus n' n
val append_uint : #n1:nat -> #n2:nat -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 -> UInt.uint_t (n1+n2)
let append_uint #n1 #n2 num1 num2 =
shift_bound num2 n1;
num1 + num2 * pow2 n1
val to_vec_append : #n1:nat{n1 > 0} -> #n2:nat{n2 > 0} -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 ->
Lemma (UInt.to_vec (append_uint num1 num2) == Seq.append (UInt.to_vec num2) (UInt.to_vec num1))
let to_vec_append #n1 #n2 num1 num2 =
UInt.append_lemma (UInt.to_vec num2) (UInt.to_vec num1)
let vec128 (a: t) : BV.bv_t 128 = UInt.to_vec #128 (v a)
let vec64 (a: U64.t) : BV.bv_t 64 = UInt.to_vec (U64.v a)
let to_vec_v (a: t) :
Lemma (vec128 a == Seq.append (vec64 a.high) (vec64 a.low)) =
to_vec_append (U64.v a.low) (U64.v a.high)
val logand_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2) ==
BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2))
(BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logand #128 (v a) (v b))) =
let r = { low = U64.logand a.low b.low;
high = U64.logand a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logand_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logand_vec (vec128 a) (vec128 b));
r
val logxor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2) ==
BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2))
(BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logxor #128 (v a) (v b))) =
let r = { low = U64.logxor a.low b.low;
high = U64.logxor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logxor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logxor_vec (vec128 a) (vec128 b));
r
val logor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2) ==
BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2))
(BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logor #128 (v a) (v b))) =
let r = { low = U64.logor a.low b.low;
high = U64.logor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logor_vec (vec128 a) (vec128 b));
r
val lognot_vec_append (#n1 #n2: pos) (a1: BV.bv_t n1) (a2: BV.bv_t n2) :
Lemma (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2) ==
BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot_vec_append #n1 #n2 a1 a2 =
Seq.lemma_eq_intro (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2))
(BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot (a: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.lognot #128 (v a))) =
let r = { low = U64.lognot a.low;
high = U64.lognot a.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
lognot_vec_append (vec64 a.high) (vec64 a.low);
to_vec_v a;
assert (vec128 r == BV.lognot_vec (vec128 a));
r
let mod_mul_cancel (n:nat) (k:nat{k > 0}) :
Lemma ((n * k) % k == 0) =
mod_spec (n * k) k;
mul_div_cancel n k;
()
let shift_past_mod (n:nat) (k1:nat) (k2:nat{k2 >= k1}) :
Lemma ((n * pow2 k2) % pow2 k1 == 0) =
assert (k2 == (k2 - k1) + k1);
Math.pow2_plus (k2 - k1) k1;
Math.paren_mul_right n (pow2 (k2 - k1)) (pow2 k1);
mod_mul_cancel (n * pow2 (k2 - k1)) (pow2 k1)
val mod_double: a:nat -> k:nat{k>0} ->
Lemma (a % k % k == a % k)
let mod_double a k =
mod_mod a k 1
let shift_left_large_val (#n1:nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s:nat) :
Lemma ((a1 + a2 * pow2 n1) * pow2 s == (a1 * pow2 s + a2 * pow2 (n1+s))) =
Math.distributivity_add_left a1 (a2 * pow2 n1) (pow2 s);
Math.paren_mul_right a2 (pow2 n1) (pow2 s);
Math.pow2_plus n1 s
#push-options "--z3rlimit 40"
let shift_left_large_lemma (#n1: nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s: nat{s >= n2}) :
Lemma (((a1 + a2 * pow2 n1) * pow2 s) % pow2 (n1+n2) ==
(a1 * pow2 s) % pow2 (n1+n2)) =
shift_left_large_val a1 a2 s;
mod_add (a1 * pow2 s) (a2 * pow2 (n1+s)) (pow2 (n1+n2));
shift_past_mod a2 (n1+n2) (n1+s);
mod_double (a1 * pow2 s) (pow2 (n1+n2));
()
#pop-options
val shift_left_large_lemma_t : a:t -> s:nat ->
Lemma (requires (s >= 64))
(ensures ((v a * pow2 s) % pow2 128 ==
(U64.v a.low * pow2 s) % pow2 128))
let shift_left_large_lemma_t a s =
shift_left_large_lemma #64 #64 (U64.v a.low) (U64.v a.high) s
private let u32_64: n:U32.t{U32.v n == 64} = U32.uint_to_t 64
val div_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} -> Lemma
(requires True)
(ensures (a / pow2 (n1 - n2) == a * pow2 n2 / pow2 n1))
let div_pow2_diff a n1 n2 =
Math.pow2_plus n2 (n1-n2);
assert (a * pow2 n2 / pow2 n1 == a * pow2 n2 / (pow2 n2 * pow2 (n1 - n2)));
div_product (a * pow2 n2) (pow2 n2) (pow2 (n1-n2));
mul_div_cancel a (pow2 n2)
val mod_mul_pow2 : n:nat -> e1:nat -> e2:nat ->
Lemma (n % pow2 e1 * pow2 e2 <= pow2 (e1+e2) - pow2 e2)
let mod_mul_pow2 n e1 e2 =
Math.lemma_mod_lt n (pow2 e1);
Math.lemma_mult_le_right (pow2 e2) (n % pow2 e1) (pow2 e1 - 1);
assert (n % pow2 e1 * pow2 e2 <= pow2 e1 * pow2 e2 - pow2 e2);
Math.pow2_plus e1 e2
let pow2_div_bound #b (n:UInt.uint_t b) (s:nat{s <= b}) :
Lemma (n / pow2 s < pow2 (b - s)) =
Math.lemma_div_lt n b s
#push-options "--smtencoding.l_arith_repr native --z3rlimit 40"
let add_u64_shift_left (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r = (U64.v hi * pow2 (U32.v s)) % pow2 64 + U64.v lo / pow2 (64 - U32.v s))) =
let high = U64.shift_left hi s in
let low = U64.shift_right lo (U32.sub u32_64 s) in
let s = U32.v s in
let high_n = U64.v hi % pow2 (64 - s) * pow2 s in
let low_n = U64.v lo / pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 s) (pow2 (64-s));
assert (U64.v high == high_n);
assert (U64.v low == low_n);
pow2_div_bound (U64.v lo) (64-s);
assert (low_n < pow2 s);
mod_mul_pow2 (U64.v hi) (64 - s) s;
U64.add high low
#pop-options
let div_plus_multiple (a:nat) (b:nat) (k:pos) :
Lemma (requires (a < k))
(ensures ((a + b * k) / k == b)) =
Math.division_addition_lemma a k b;
Math.small_division_lemma_1 a k
val div_add_small: n:nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (k1*m / (k1*k2) == (n + k1*m) / (k1*k2)))
let div_add_small n m k1 k2 =
div_product (k1*m) k1 k2;
div_product (n+k1*m) k1 k2;
mul_div_cancel m k1;
assert (k1*m/k1 == m);
div_plus_multiple n m k1
val add_mod_small: n: nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (n + (k1 * m) % (k1 * k2) ==
(n + k1 * m) % (k1 * k2)))
let add_mod_small n m k1 k2 =
mod_spec (k1 * m) (k1 * k2);
mod_spec (n + k1 * m) (k1 * k2);
div_add_small n m k1 k2
let mod_then_mul_64 (n:nat) : Lemma (n % pow2 64 * pow2 64 == n * pow2 64 % pow2 128) =
Math.pow2_plus 64 64;
mod_mul n (pow2 64) (pow2 64)
let mul_abc_to_acb (a b c: int) : Lemma (a * b * c == a * c * b) = ()
let add_u64_shift_left_respec (hi lo:U64.t) (s:U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r ->
U64.v r * pow2 64 ==
(U64.v hi * pow2 64) * pow2 (U32.v s) % pow2 128 +
U64.v lo * pow2 (U32.v s) / pow2 64 * pow2 64)) =
let r = add_u64_shift_left hi lo s in
let hi = U64.v hi in
let lo = U64.v lo in
let s = U32.v s in
// spec of add_u64_shift_left
assert (U64.v r == hi * pow2 s % pow2 64 + lo / pow2 (64 - s));
Math.distributivity_add_left (hi * pow2 s % pow2 64) (lo / pow2 (64-s)) (pow2 64);
mod_then_mul_64 (hi * pow2 s);
assert (hi * pow2 s % pow2 64 * pow2 64 == (hi * pow2 s * pow2 64) % pow2 128);
div_pow2_diff lo 64 s;
assert (lo / pow2 (64-s) == lo * pow2 s / pow2 64);
assert (U64.v r * pow2 64 == hi * pow2 s * pow2 64 % pow2 128 + lo * pow2 s / pow2 64 * pow2 64);
mul_abc_to_acb hi (pow2 s) (pow2 64);
r
val add_mod_small' : n:nat -> m:nat -> k:pos ->
Lemma (requires (n + m % k < k))
(ensures (n + m % k == (n + m) % k))
let add_mod_small' n m k =
Math.lemma_mod_lt (n + m % k) k;
Math.modulo_lemma n k;
mod_add n m k
#push-options "--retry 5"
let shift_t_val (a: t) (s: nat) :
Lemma (v a * pow2 s == U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s)) =
Math.pow2_plus 64 s;
()
#pop-options
val mul_mod_bound : n:nat -> s1:nat -> s2:nat{s2>=s1} ->
Lemma (n * pow2 s1 % pow2 s2 <= pow2 s2 - pow2 s1)
#push-options "--retry 5"
let mul_mod_bound n s1 s2 =
// n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1
// n % pow2 (s2-s1) <= pow2 (s2-s1) - 1
// n % pow2 (s2-s1) * pow2 s1 <= pow2 s2 - pow2 s1
mod_mul n (pow2 s1) (pow2 (s2-s1));
// assert (n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1);
Math.lemma_mod_lt n (pow2 (s2-s1));
Math.lemma_mult_le_right (pow2 s1) (n % pow2 (s2-s1)) (pow2 (s2-s1) - 1);
Math.pow2_plus (s2-s1) s1
#pop-options
let add_lt_le (a a' b b': int) :
Lemma (requires (a < a' /\ b <= b'))
(ensures (a + b < a' + b')) = ()
let u64_pow2_bound (a: UInt.uint_t 64) (s: nat) :
Lemma (a * pow2 s < pow2 (64+s)) =
Math.pow2_plus 64 s;
Math.lemma_mult_le_right (pow2 s) a (pow2 64)
let shift_t_mod_val' (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s) % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
u64_pow2_bound a_l s;
mul_mod_bound a_h (64+s) 128;
// assert (a_h * pow2 (64+s) % pow2 128 <= pow2 128 - pow2 (64+s));
add_lt_le (a_l * pow2 s) (pow2 (64+s)) (a_h * pow2 (64+s) % pow2 128) (pow2 128 - pow2 (64+s));
add_mod_small' (a_l * pow2 s) (a_h * pow2 (64+s)) (pow2 128);
shift_t_val a s;
()
let shift_t_mod_val (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + (U64.v a.high * pow2 64) * pow2 s % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
shift_t_mod_val' a s;
Math.pow2_plus 64 s;
Math.paren_mul_right a_h (pow2 64) (pow2 s);
()
#push-options "--z3rlimit 20"
let shift_left_small (a: t) (s: U32.t) : Pure t
(requires (U32.v s < 64))
(ensures (fun r -> v r = (v a * pow2 (U32.v s)) % pow2 128)) =
if U32.eq s 0ul then a
else
let r = { low = U64.shift_left a.low s;
high = add_u64_shift_left_respec a.high a.low s; } in
let s = U32.v s in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
mod_spec_rew_n (a_l * pow2 s) (pow2 64);
shift_t_mod_val a s;
r
#pop-options
val shift_left_large : a:t -> s:U32.t{U32.v s >= 64 /\ U32.v s < 128} ->
r:t{v r = (v a * pow2 (U32.v s)) % pow2 128}
#push-options "--z3rlimit 50 --retry 5" // sporadically fails
let shift_left_large a s =
let h_shift = U32.sub s u32_64 in
assert (U32.v h_shift < 64);
let r = { low = U64.uint_to_t 0;
high = U64.shift_left a.low h_shift; } in
assert (U64.v r.high == (U64.v a.low * pow2 (U32.v s - 64)) % pow2 64);
mod_mul (U64.v a.low * pow2 (U32.v s - 64)) (pow2 64) (pow2 64);
Math.pow2_plus (U32.v s - 64) 64;
assert (U64.v r.high * pow2 64 == (U64.v a.low * pow2 (U32.v s)) % pow2 128);
shift_left_large_lemma_t a (U32.v s);
r
#pop-options
let shift_left a s =
if (U32.lt s u32_64) then shift_left_small a s
else shift_left_large a s
let add_u64_shift_right (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r == U64.v lo / pow2 (U32.v s) +
U64.v hi * pow2 (64 - U32.v s) % pow2 64)) =
let low = U64.shift_right lo s in
let high = U64.shift_left hi (U32.sub u32_64 s) in
let s = U32.v s in
let low_n = U64.v lo / pow2 s in
let high_n = U64.v hi % pow2 s * pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 (64-s)) (pow2 s);
assert (U64.v high == high_n);
pow2_div_bound (U64.v lo) s;
assert (low_n < pow2 (64 - s));
mod_mul_pow2 (U64.v hi) s (64 - s);
U64.add low high
val mul_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} ->
Lemma (a * pow2 (n1 - n2) == a * pow2 n1 / pow2 n2)
let mul_pow2_diff a n1 n2 =
Math.paren_mul_right a (pow2 (n1-n2)) (pow2 n2);
mul_div_cancel (a * pow2 (n1 - n2)) (pow2 n2);
Math.pow2_plus (n1 - n2) n2
let add_u64_shift_right_respec (hi lo:U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r == U64.v lo / pow2 (U32.v s) +
U64.v hi * pow2 64 / pow2 (U32.v s) % pow2 64)) =
let r = add_u64_shift_right hi lo s in
let s = U32.v s in
mul_pow2_diff (U64.v hi) 64 s;
r
let mul_div_spec (n:nat) (k:pos) : Lemma (n / k * k == n - n % k) = ()
let mul_distr_sub (n1 n2:nat) (k:nat) : Lemma ((n1 - n2) * k == n1 * k - n2 * k) = ()
val div_product_comm : n1:nat -> k1:pos -> k2:pos ->
Lemma (n1 / k1 / k2 == n1 / k2 / k1)
let div_product_comm n1 k1 k2 =
div_product n1 k1 k2;
div_product n1 k2 k1
val shift_right_reconstruct : a_h:UInt.uint_t 64 -> s:nat{s < 64} ->
Lemma (a_h * pow2 (64-s) == a_h / pow2 s * pow2 64 + a_h * pow2 64 / pow2 s % pow2 64)
let shift_right_reconstruct a_h s =
mul_pow2_diff a_h 64 s;
mod_spec_rew_n (a_h * pow2 (64-s)) (pow2 64);
div_product_comm (a_h * pow2 64) (pow2 s) (pow2 64);
mul_div_cancel a_h (pow2 64);
assert (a_h / pow2 s * pow2 64 == a_h * pow2 64 / pow2 s / pow2 64 * pow2 64);
()
val u128_div_pow2 (a: t) (s:nat{s < 64}) :
Lemma (v a / pow2 s == U64.v a.low / pow2 s + U64.v a.high * pow2 (64 - s))
let u128_div_pow2 a s =
Math.pow2_plus (64-s) s;
Math.paren_mul_right (U64.v a.high) (pow2 (64-s)) (pow2 s);
Math.division_addition_lemma (U64.v a.low) (pow2 s) (U64.v a.high * pow2 (64 - s))
let shift_right_small (a: t) (s: U32.t{U32.v s < 64}) : Pure t
(requires True)
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
if U32.eq s 0ul then a
else
let r = { low = add_u64_shift_right_respec a.high a.low s;
high = U64.shift_right a.high s; } in
let a_h = U64.v a.high in
let a_l = U64.v a.low in
let s = U32.v s in
shift_right_reconstruct a_h s;
assert (v r == a_h * pow2 (64-s) + a_l / pow2 s);
u128_div_pow2 a s;
r
let shift_right_large (a: t) (s: U32.t{U32.v s >= 64 /\ U32.v s < 128}) : Pure t
(requires True)
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
let r = { high = U64.uint_to_t 0;
low = U64.shift_right a.high (U32.sub s u32_64); } in
let s = U32.v s in
Math.pow2_plus 64 (s - 64);
div_product (v a) (pow2 64) (pow2 (s - 64));
assert (v a / pow2 s == v a / pow2 64 / pow2 (s - 64));
div_plus_multiple (U64.v a.low) (U64.v a.high) (pow2 64);
r
let shift_right (a: t) (s: U32.t) : Pure t
(requires (U32.v s < 128))
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
if U32.lt s u32_64
then shift_right_small a s
else shift_right_large a s
let eq (a b:t) = U64.eq a.low b.low && U64.eq a.high b.high
let gt (a b:t) = U64.gt a.high b.high ||
(U64.eq a.high b.high && U64.gt a.low b.low)
let lt (a b:t) = U64.lt a.high b.high ||
(U64.eq a.high b.high && U64.lt a.low b.low)
let gte (a b:t) = U64.gt a.high b.high ||
(U64.eq a.high b.high && U64.gte a.low b.low)
let lte (a b:t) = U64.lt a.high b.high ||
(U64.eq a.high b.high && U64.lte a.low b.low)
let u64_logand_comm (a b:U64.t) : Lemma (U64.logand a b == U64.logand b a) =
UInt.logand_commutative (U64.v a) (U64.v b)
val u64_and_0 (a b:U64.t) :
Lemma (U64.v b = 0 ==> U64.v (U64.logand a b) = 0)
[SMTPat (U64.logand a b)]
let u64_and_0 a b = UInt.logand_lemma_1 (U64.v a)
let u64_0_and (a b:U64.t) :
Lemma (U64.v a = 0 ==> U64.v (U64.logand a b) = 0)
[SMTPat (U64.logand a b)] =
u64_logand_comm a b
val u64_1s_and (a b:U64.t) :
Lemma (U64.v a = pow2 64 - 1 /\
U64.v b = pow2 64 - 1 ==> U64.v (U64.logand a b) = pow2 64 - 1)
[SMTPat (U64.logand a b)]
let u64_1s_and a b = UInt.logand_lemma_2 (U64.v a)
let eq_mask (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a = v b ==> v r = pow2 128 - 1) /\ (v a <> v b ==> v r = 0))) =
let mask = U64.logand (U64.eq_mask a.low b.low)
(U64.eq_mask a.high b.high) in
{ low = mask; high = mask; }
private let gte_characterization (a b: t) :
Lemma (v a >= v b ==>
U64.v a.high > U64.v b.high \/
(U64.v a.high = U64.v b.high /\ U64.v a.low >= U64.v b.low)) = ()
private let lt_characterization (a b: t) :
Lemma (v a < v b ==>
U64.v a.high < U64.v b.high \/
(U64.v a.high = U64.v b.high /\ U64.v a.low < U64.v b.low)) = ()
let u64_logor_comm (a b:U64.t) : Lemma (U64.logor a b == U64.logor b a) =
UInt.logor_commutative (U64.v a) (U64.v b)
val u64_or_1 (a b:U64.t) :
Lemma (U64.v b = pow2 64 - 1 ==> U64.v (U64.logor a b) = pow2 64 - 1)
[SMTPat (U64.logor a b)]
let u64_or_1 a b = UInt.logor_lemma_2 (U64.v a)
let u64_1_or (a b:U64.t) :
Lemma (U64.v a = pow2 64 - 1 ==> U64.v (U64.logor a b) = pow2 64 - 1)
[SMTPat (U64.logor a b)] =
u64_logor_comm a b
val u64_or_0 (a b:U64.t) :
Lemma (U64.v a = 0 /\ U64.v b = 0 ==> U64.v (U64.logor a b) = 0)
[SMTPat (U64.logor a b)]
let u64_or_0 a b = UInt.logor_lemma_1 (U64.v a)
val u64_not_0 (a:U64.t) :
Lemma (U64.v a = 0 ==> U64.v (U64.lognot a) = pow2 64 - 1)
[SMTPat (U64.lognot a)]
let u64_not_0 a = UInt.lognot_lemma_1 #64
val u64_not_1 (a:U64.t) :
Lemma (U64.v a = pow2 64 - 1 ==> U64.v (U64.lognot a) = 0)
[SMTPat (U64.lognot a)]
let u64_not_1 a =
UInt.nth_lemma (UInt.lognot #64 (UInt.ones 64)) (UInt.zero 64)
let gte_mask (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a >= v b ==> v r = pow2 128 - 1) /\ (v a < v b ==> v r = 0))) =
let mask_hi_gte = U64.logand (U64.gte_mask a.high b.high)
(U64.lognot (U64.eq_mask a.high b.high)) in
let mask_lo_gte = U64.logand (U64.eq_mask a.high b.high)
(U64.gte_mask a.low b.low) in
let mask = U64.logor mask_hi_gte mask_lo_gte in
gte_characterization a b;
lt_characterization a b;
{ low = mask; high = mask; }
let uint64_to_uint128 (a:U64.t) = { low = a; high = U64.uint_to_t 0; }
let uint128_to_uint64 (a:t) : b:U64.t{U64.v b == v a % pow2 64} = a.low
inline_for_extraction
let u64_l32_mask: x:U64.t{U64.v x == pow2 32 - 1} = U64.uint_to_t 0xffffffff
let u64_mod_32 (a: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r = U64.v a % pow2 32)) =
UInt.logand_mask (U64.v a) 32;
U64.logand a u64_l32_mask
let u64_32_digits (a: U64.t) : Lemma (U64.v a / pow2 32 * pow2 32 + U64.v a % pow2 32 == U64.v a) =
div_mod (U64.v a) (pow2 32)
val mul32_digits : x:UInt.uint_t 64 -> y:UInt.uint_t 32 ->
Lemma (x * y == (x / pow2 32 * y) * pow2 32 + (x % pow2 32) * y)
let mul32_digits x y = ()
let u32_32 : x:U32.t{U32.v x == 32} = U32.uint_to_t 32
#push-options "--z3rlimit 30"
let u32_combine (hi lo: U64.t) : Pure U64.t
(requires (U64.v lo < pow2 32))
(ensures (fun r -> U64.v r = U64.v hi % pow2 32 * pow2 32 + U64.v lo)) =
U64.add lo (U64.shift_left hi u32_32)
#pop-options
let product_bound (a b:nat) (k:pos) :
Lemma (requires (a < k /\ b < k))
(ensures a * b <= k * k - 2*k + 1) =
Math.lemma_mult_le_right b a (k-1);
Math.lemma_mult_le_left (k-1) b (k-1)
val uint_product_bound : #n:nat -> a:UInt.uint_t n -> b:UInt.uint_t n ->
Lemma (a * b <= pow2 (2*n) - 2*(pow2 n) + 1)
let uint_product_bound #n a b =
product_bound a b (pow2 n);
Math.pow2_plus n n
val u32_product_bound : a:nat{a < pow2 32} -> b:nat{b < pow2 32} ->
Lemma (UInt.size (a * b) 64 /\ a * b < pow2 64 - pow2 32 - 1)
let u32_product_bound a b =
uint_product_bound #32 a b
let mul32 x y =
let x0 = u64_mod_32 x in
let x1 = U64.shift_right x u32_32 in
u32_product_bound (U64.v x0) (U32.v y);
let x0y = U64.mul x0 (FStar.Int.Cast.uint32_to_uint64 y) in
let x0yl = u64_mod_32 x0y in
let x0yh = U64.shift_right x0y u32_32 in
u32_product_bound (U64.v x1) (U32.v y);
// not in the original C code
let x1y' = U64.mul x1 (FStar.Int.Cast.uint32_to_uint64 y) in
let x1y = U64.add x1y' x0yh in
// correspondence with C:
// r0 = r.low
// r0 is written using u32_combine hi lo = lo + hi << 32
// r1 = r.high
let r = { low = u32_combine x1y x0yl;
high = U64.shift_right x1y u32_32; } in
u64_32_digits x;
//assert (U64.v x == U64.v x1 * pow2 32 + U64.v x0);
assert (U64.v x0y == U64.v x0 * U32.v y);
u64_32_digits x0y;
//assert (U64.v x0y == U64.v x0yh * pow2 32 + U64.v x0yl);
assert (U64.v x1y' == U64.v x / pow2 32 * U32.v y);
mul32_digits (U64.v x) (U32.v y);
assert (U64.v x * U32.v y == U64.v x1y' * pow2 32 + U64.v x0y);
r | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val h32 (x: UInt.uint_t 64) : UInt.uint_t 32 | [] | FStar.UInt128.h32 | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | x: FStar.UInt.uint_t 64 -> FStar.UInt.uint_t 32 | {
"end_col": 58,
"end_line": 968,
"start_col": 47,
"start_line": 968
} |
Prims.Pure | val shift_left: a:t -> s:UInt32.t -> Pure t
(requires (U32.v s < n))
(ensures (fun c -> v c = ((v a * pow2 (UInt32.v s)) % pow2 n))) | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let shift_left a s =
if (U32.lt s u32_64) then shift_left_small a s
else shift_left_large a s | val shift_left: a:t -> s:UInt32.t -> Pure t
(requires (U32.v s < n))
(ensures (fun c -> v c = ((v a * pow2 (UInt32.v s)) % pow2 n)))
let shift_left a s = | false | null | false | if (U32.lt s u32_64) then shift_left_small a s else shift_left_large a s | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [] | [
"FStar.UInt128.t",
"FStar.UInt32.t",
"FStar.UInt32.lt",
"FStar.UInt128.u32_64",
"FStar.UInt128.shift_left_small",
"Prims.bool",
"FStar.UInt128.shift_left_large"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract
let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63
let carry_uint64_ok (a b:uint_t 64)
: squash (int2bv (carry_uint64 a b) == carry_bv a b)
= _ by (T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl())
let fact1 (a b: uint_t 64) = carry_bv a b == int2bv 1
let fact0 (a b: uint_t 64) = carry_bv a b == int2bv 0
let lem_ult_1 (a b: uint_t 64)
: squash (bvult (int2bv a) (int2bv b) ==> fact1 a b)
= assert (bvult (int2bv a) (int2bv b) ==> fact1 a b)
by (T.norm [delta_only [`%fact1;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'";
smt())
let lem_ult_2 (a b:uint_t 64)
: squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
= assert (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
by (T.norm [delta_only [`%fact0;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'")
let int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b))
= introduce (a < b) ==> (bvult #n (int2bv #n a) (int2bv #n b))
with _ . FStar.BV.int2bv_lemma_ult_1 a b;
introduce (bvult #n (int2bv #n a) (int2bv #n b)) ==> (a < b)
with _ . FStar.BV.int2bv_lemma_ult_2 a b
let lem_ult (a b:uint_t 64)
: Lemma (if a < b
then fact1 a b
else fact0 a b)
= int2bv_ult a b;
lem_ult_1 a b;
lem_ult_2 a b
let constant_time_carry (a b: U64.t) : Tot U64.t =
let open U64 in
// CONSTANT_TIME_CARRY macro
// ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
// 63 = sizeof(a) * 8 - 1
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63ul
let carry_uint64_equiv (a b:UInt64.t)
: Lemma (U64.v (constant_time_carry a b) == carry_uint64 (U64.v a) (U64.v b))
= ()
// This type gets a special treatment in KaRaMeL and its definition is never
// printed in the resulting C file.
type uint128: Type0 = { low: U64.t; high: U64.t }
let t = uint128
let _ = intro_ambient n
let _ = intro_ambient t
[@@ noextract_to "krml"]
let v x = U64.v x.low + (U64.v x.high) * (pow2 64)
let div_mod (x:nat) (k:nat{k > 0}) : Lemma (x / k * k + x % k == x) = ()
let uint_to_t x =
div_mod x (pow2 64);
{ low = U64.uint_to_t (x % (pow2 64));
high = U64.uint_to_t (x / (pow2 64)); }
let v_inj (x1 x2: t): Lemma (requires (v x1 == v x2))
(ensures x1 == x2) =
assert (uint_to_t (v x1) == uint_to_t (v x2));
assert (uint_to_t (v x1) == x1);
assert (uint_to_t (v x2) == x2);
()
(* A weird helper used below... seems like the native encoding of
bitvectors may be making these proofs much harder than they should be? *)
let bv2int_fun (#n:pos) (a b : bv_t n)
: Lemma (requires a == b)
(ensures bv2int a == bv2int b)
= ()
(* This proof is quite brittle. It has a bunch of annotations to get
decent verification performance. *)
let constant_time_carry_ok (a b:U64.t)
: Lemma (constant_time_carry a b ==
(if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0))
= calc (==) {
U64.v (constant_time_carry a b);
(==) { carry_uint64_equiv a b }
carry_uint64 (U64.v a) (U64.v b);
(==) { inverse_num_lemma (carry_uint64 (U64.v a) (U64.v b)) }
bv2int (int2bv (carry_uint64 (U64.v a) (U64.v b)));
(==) { carry_uint64_ok (U64.v a) (U64.v b);
bv2int_fun (int2bv (carry_uint64 (U64.v a) (U64.v b))) (carry_bv (U64.v a) (U64.v b));
()
}
bv2int (carry_bv (U64.v a) (U64.v b));
(==) {
lem_ult (U64.v a) (U64.v b);
bv2int_fun (carry_bv (U64.v a) (U64.v b)) (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
}
bv2int
(if U64.v a < U64.v b
then int2bv 1
else int2bv 0);
};
assert (
bv2int (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
== U64.v (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)) by (T.norm []);
U64.v_inj (constant_time_carry a b) (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)
let carry (a b: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r == (if U64.v a < U64.v b then 1 else 0))) =
constant_time_carry_ok a b;
constant_time_carry a b
let carry_sum_ok (a b:U64.t) :
Lemma (U64.v (carry (U64.add_mod a b) b) == (U64.v a + U64.v b) / (pow2 64)) = ()
let add (a b: t) : Pure t
(requires (v a + v b < pow2 128))
(ensures (fun r -> v a + v b = v r)) =
let l = U64.add_mod a.low b.low in
carry_sum_ok a.low b.low;
{ low = l;
high = U64.add (U64.add a.high b.high) (carry l b.low); }
let add_underspec (a b: t) =
let l = U64.add_mod a.low b.low in
begin
if v a + v b < pow2 128
then carry_sum_ok a.low b.low
else ()
end;
{ low = l;
high = U64.add_underspec (U64.add_underspec a.high b.high) (carry l b.low); }
val mod_mod: a:nat -> k:nat{k>0} -> k':nat{k'>0} ->
Lemma ((a % k) % (k'*k) == a % k)
let mod_mod a k k' =
assert (a % k < k);
assert (a % k < k' * k)
let mod_spec (a:nat) (k:nat{k > 0}) :
Lemma (a % k == a - a / k * k) = ()
val div_product : n:nat -> m1:nat{m1>0} -> m2:nat{m2>0} ->
Lemma (n / (m1*m2) == (n / m1) / m2)
let div_product n m1 m2 =
Math.division_multiplication_lemma n m1 m2
val mul_div_cancel : n:nat -> k:nat{k>0} ->
Lemma ((n * k) / k == n)
let mul_div_cancel n k =
Math.cancel_mul_div n k
val mod_mul: n:nat -> k1:pos -> k2:pos ->
Lemma ((n % k2) * k1 == (n * k1) % (k1*k2))
let mod_mul n k1 k2 =
Math.modulo_scale_lemma n k1 k2
let mod_spec_rew_n (n:nat) (k:nat{k > 0}) :
Lemma (n == n / k * k + n % k) = mod_spec n k
val mod_add: n1:nat -> n2:nat -> k:nat{k > 0} ->
Lemma ((n1 % k + n2 % k) % k == (n1 + n2) % k)
let mod_add n1 n2 k = Math.modulo_distributivity n1 n2 k
val mod_add_small: n1:nat -> n2:nat -> k:nat{k > 0} -> Lemma
(requires (n1 % k + n2 % k < k))
(ensures (n1 % k + n2 % k == (n1 + n2) % k))
let mod_add_small n1 n2 k =
mod_add n1 n2 k;
Math.small_modulo_lemma_1 (n1%k + n2%k) k
// This proof is pretty stable with the calc proof, but it can fail
// ~1% of the times, so add a retry.
#push-options "--split_queries no --z3rlimit 20 --retry 5"
let add_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a + v b) % pow2 128 = v r)) =
let l = U64.add_mod a.low b.low in
let r = { low = l;
high = U64.add_mod (U64.add_mod a.high b.high) (carry l b.low)} in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
let b_l = U64.v b.low in
let b_h = U64.v b.high in
carry_sum_ok a.low b.low;
Math.lemma_mod_plus_distr_l (a_h + b_h) ((a_l + b_l) / (pow2 64)) (pow2 64);
calc (==) {
U64.v r.high * pow2 64;
== {}
((a_h + b_h + (a_l + b_l) / (pow2 64)) % pow2 64) * pow2 64;
== { mod_mul (a_h + b_h + (a_l + b_l) / (pow2 64)) (pow2 64) (pow2 64) }
((a_h + b_h + (a_l + b_l) / (pow2 64)) * pow2 64) % (pow2 64 * pow2 64);
== {}
((a_h + b_h + (a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
== {}
(a_h * pow2 64 + b_h * pow2 64 + ((a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
};
assert (U64.v r.low == (U64.v r.low) % pow2 128);
mod_add_small (a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64))
((a_l + b_l) % (pow2 64))
(pow2 128);
assert (U64.v r.low + U64.v r.high * pow2 64 ==
(a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64) + (a_l + b_l) % (pow2 64)) % pow2 128);
mod_spec_rew_n (a_l + b_l) (pow2 64);
assert (v r ==
(a_h * pow2 64 +
b_h * pow2 64 +
a_l + b_l) % pow2 128);
assert_spinoff ((v a + v b) % pow2 128 = v r);
r
#pop-options
#push-options "--retry 5"
let sub (a b: t) : Pure t
(requires (v a - v b >= 0))
(ensures (fun r -> v r = v a - v b)) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub (U64.sub a.high b.high) (carry a.low l); }
#pop-options
let sub_underspec (a b: t) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_underspec (U64.sub_underspec a.high b.high) (carry a.low l); }
let sub_mod_impl (a b: t) : t =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_mod (U64.sub_mod a.high b.high) (carry a.low l); }
#push-options "--retry 10" // flaky
let sub_mod_pos_ok (a b:t) : Lemma
(requires (v a - v b >= 0))
(ensures (v (sub_mod_impl a b) = v a - v b)) =
assert (sub a b == sub_mod_impl a b);
()
#pop-options
val u64_diff_wrap : a:U64.t -> b:U64.t ->
Lemma (requires (U64.v a < U64.v b))
(ensures (U64.v (U64.sub_mod a b) == U64.v a - U64.v b + pow2 64))
let u64_diff_wrap a b = ()
#push-options "--z3rlimit 20"
val sub_mod_wrap1_ok : a:t -> b:t -> Lemma
(requires (v a - v b < 0 /\ U64.v a.low < U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128))
#push-options "--retry 10"
let sub_mod_wrap1_ok a b =
// carry == 1 and subtraction in low wraps
let l = U64.sub_mod a.low b.low in
assert (U64.v (carry a.low l) == 1);
u64_diff_wrap a.low b.low;
// a.high <= b.high since v a < v b;
// case split on equality and strictly less
if U64.v a.high = U64.v b.high then ()
else begin
u64_diff_wrap a.high b.high;
()
end
#pop-options
let sum_lt (a1 a2 b1 b2:nat) : Lemma
(requires (a1 + a2 < b1 + b2 /\ a1 >= b1))
(ensures (a2 < b2)) = ()
let sub_mod_wrap2_ok (a b:t) : Lemma
(requires (v a - v b < 0 /\ U64.v a.low >= U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
// carry == 0, subtraction in low is exact, but subtraction in high
// must wrap since v a < v b
let l = U64.sub_mod a.low b.low in
let r = sub_mod_impl a b in
assert (U64.v l == U64.v a.low - U64.v b.low);
assert (U64.v (carry a.low l) == 0);
sum_lt (U64.v a.low) (U64.v a.high * pow2 64) (U64.v b.low) (U64.v b.high * pow2 64);
assert (U64.v (U64.sub_mod a.high b.high) == U64.v a.high - U64.v b.high + pow2 64);
()
let sub_mod_wrap_ok (a b:t) : Lemma
(requires (v a - v b < 0))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
if U64.v a.low < U64.v b.low
then sub_mod_wrap1_ok a b
else sub_mod_wrap2_ok a b
#push-options "--z3rlimit 40"
let sub_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = (v a - v b) % pow2 128)) =
(if v a - v b >= 0
then sub_mod_pos_ok a b
else sub_mod_wrap_ok a b);
sub_mod_impl a b
#pop-options
val shift_bound : #n:nat -> num:UInt.uint_t n -> n':nat ->
Lemma (num * pow2 n' <= pow2 (n'+n) - pow2 n')
let shift_bound #n num n' =
Math.lemma_mult_le_right (pow2 n') num (pow2 n - 1);
Math.distributivity_sub_left (pow2 n) 1 (pow2 n');
Math.pow2_plus n' n
val append_uint : #n1:nat -> #n2:nat -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 -> UInt.uint_t (n1+n2)
let append_uint #n1 #n2 num1 num2 =
shift_bound num2 n1;
num1 + num2 * pow2 n1
val to_vec_append : #n1:nat{n1 > 0} -> #n2:nat{n2 > 0} -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 ->
Lemma (UInt.to_vec (append_uint num1 num2) == Seq.append (UInt.to_vec num2) (UInt.to_vec num1))
let to_vec_append #n1 #n2 num1 num2 =
UInt.append_lemma (UInt.to_vec num2) (UInt.to_vec num1)
let vec128 (a: t) : BV.bv_t 128 = UInt.to_vec #128 (v a)
let vec64 (a: U64.t) : BV.bv_t 64 = UInt.to_vec (U64.v a)
let to_vec_v (a: t) :
Lemma (vec128 a == Seq.append (vec64 a.high) (vec64 a.low)) =
to_vec_append (U64.v a.low) (U64.v a.high)
val logand_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2) ==
BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2))
(BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logand #128 (v a) (v b))) =
let r = { low = U64.logand a.low b.low;
high = U64.logand a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logand_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logand_vec (vec128 a) (vec128 b));
r
val logxor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2) ==
BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2))
(BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logxor #128 (v a) (v b))) =
let r = { low = U64.logxor a.low b.low;
high = U64.logxor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logxor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logxor_vec (vec128 a) (vec128 b));
r
val logor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2) ==
BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2))
(BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logor #128 (v a) (v b))) =
let r = { low = U64.logor a.low b.low;
high = U64.logor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logor_vec (vec128 a) (vec128 b));
r
val lognot_vec_append (#n1 #n2: pos) (a1: BV.bv_t n1) (a2: BV.bv_t n2) :
Lemma (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2) ==
BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot_vec_append #n1 #n2 a1 a2 =
Seq.lemma_eq_intro (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2))
(BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot (a: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.lognot #128 (v a))) =
let r = { low = U64.lognot a.low;
high = U64.lognot a.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
lognot_vec_append (vec64 a.high) (vec64 a.low);
to_vec_v a;
assert (vec128 r == BV.lognot_vec (vec128 a));
r
let mod_mul_cancel (n:nat) (k:nat{k > 0}) :
Lemma ((n * k) % k == 0) =
mod_spec (n * k) k;
mul_div_cancel n k;
()
let shift_past_mod (n:nat) (k1:nat) (k2:nat{k2 >= k1}) :
Lemma ((n * pow2 k2) % pow2 k1 == 0) =
assert (k2 == (k2 - k1) + k1);
Math.pow2_plus (k2 - k1) k1;
Math.paren_mul_right n (pow2 (k2 - k1)) (pow2 k1);
mod_mul_cancel (n * pow2 (k2 - k1)) (pow2 k1)
val mod_double: a:nat -> k:nat{k>0} ->
Lemma (a % k % k == a % k)
let mod_double a k =
mod_mod a k 1
let shift_left_large_val (#n1:nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s:nat) :
Lemma ((a1 + a2 * pow2 n1) * pow2 s == (a1 * pow2 s + a2 * pow2 (n1+s))) =
Math.distributivity_add_left a1 (a2 * pow2 n1) (pow2 s);
Math.paren_mul_right a2 (pow2 n1) (pow2 s);
Math.pow2_plus n1 s
#push-options "--z3rlimit 40"
let shift_left_large_lemma (#n1: nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s: nat{s >= n2}) :
Lemma (((a1 + a2 * pow2 n1) * pow2 s) % pow2 (n1+n2) ==
(a1 * pow2 s) % pow2 (n1+n2)) =
shift_left_large_val a1 a2 s;
mod_add (a1 * pow2 s) (a2 * pow2 (n1+s)) (pow2 (n1+n2));
shift_past_mod a2 (n1+n2) (n1+s);
mod_double (a1 * pow2 s) (pow2 (n1+n2));
()
#pop-options
val shift_left_large_lemma_t : a:t -> s:nat ->
Lemma (requires (s >= 64))
(ensures ((v a * pow2 s) % pow2 128 ==
(U64.v a.low * pow2 s) % pow2 128))
let shift_left_large_lemma_t a s =
shift_left_large_lemma #64 #64 (U64.v a.low) (U64.v a.high) s
private let u32_64: n:U32.t{U32.v n == 64} = U32.uint_to_t 64
val div_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} -> Lemma
(requires True)
(ensures (a / pow2 (n1 - n2) == a * pow2 n2 / pow2 n1))
let div_pow2_diff a n1 n2 =
Math.pow2_plus n2 (n1-n2);
assert (a * pow2 n2 / pow2 n1 == a * pow2 n2 / (pow2 n2 * pow2 (n1 - n2)));
div_product (a * pow2 n2) (pow2 n2) (pow2 (n1-n2));
mul_div_cancel a (pow2 n2)
val mod_mul_pow2 : n:nat -> e1:nat -> e2:nat ->
Lemma (n % pow2 e1 * pow2 e2 <= pow2 (e1+e2) - pow2 e2)
let mod_mul_pow2 n e1 e2 =
Math.lemma_mod_lt n (pow2 e1);
Math.lemma_mult_le_right (pow2 e2) (n % pow2 e1) (pow2 e1 - 1);
assert (n % pow2 e1 * pow2 e2 <= pow2 e1 * pow2 e2 - pow2 e2);
Math.pow2_plus e1 e2
let pow2_div_bound #b (n:UInt.uint_t b) (s:nat{s <= b}) :
Lemma (n / pow2 s < pow2 (b - s)) =
Math.lemma_div_lt n b s
#push-options "--smtencoding.l_arith_repr native --z3rlimit 40"
let add_u64_shift_left (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r = (U64.v hi * pow2 (U32.v s)) % pow2 64 + U64.v lo / pow2 (64 - U32.v s))) =
let high = U64.shift_left hi s in
let low = U64.shift_right lo (U32.sub u32_64 s) in
let s = U32.v s in
let high_n = U64.v hi % pow2 (64 - s) * pow2 s in
let low_n = U64.v lo / pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 s) (pow2 (64-s));
assert (U64.v high == high_n);
assert (U64.v low == low_n);
pow2_div_bound (U64.v lo) (64-s);
assert (low_n < pow2 s);
mod_mul_pow2 (U64.v hi) (64 - s) s;
U64.add high low
#pop-options
let div_plus_multiple (a:nat) (b:nat) (k:pos) :
Lemma (requires (a < k))
(ensures ((a + b * k) / k == b)) =
Math.division_addition_lemma a k b;
Math.small_division_lemma_1 a k
val div_add_small: n:nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (k1*m / (k1*k2) == (n + k1*m) / (k1*k2)))
let div_add_small n m k1 k2 =
div_product (k1*m) k1 k2;
div_product (n+k1*m) k1 k2;
mul_div_cancel m k1;
assert (k1*m/k1 == m);
div_plus_multiple n m k1
val add_mod_small: n: nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (n + (k1 * m) % (k1 * k2) ==
(n + k1 * m) % (k1 * k2)))
let add_mod_small n m k1 k2 =
mod_spec (k1 * m) (k1 * k2);
mod_spec (n + k1 * m) (k1 * k2);
div_add_small n m k1 k2
let mod_then_mul_64 (n:nat) : Lemma (n % pow2 64 * pow2 64 == n * pow2 64 % pow2 128) =
Math.pow2_plus 64 64;
mod_mul n (pow2 64) (pow2 64)
let mul_abc_to_acb (a b c: int) : Lemma (a * b * c == a * c * b) = ()
let add_u64_shift_left_respec (hi lo:U64.t) (s:U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r ->
U64.v r * pow2 64 ==
(U64.v hi * pow2 64) * pow2 (U32.v s) % pow2 128 +
U64.v lo * pow2 (U32.v s) / pow2 64 * pow2 64)) =
let r = add_u64_shift_left hi lo s in
let hi = U64.v hi in
let lo = U64.v lo in
let s = U32.v s in
// spec of add_u64_shift_left
assert (U64.v r == hi * pow2 s % pow2 64 + lo / pow2 (64 - s));
Math.distributivity_add_left (hi * pow2 s % pow2 64) (lo / pow2 (64-s)) (pow2 64);
mod_then_mul_64 (hi * pow2 s);
assert (hi * pow2 s % pow2 64 * pow2 64 == (hi * pow2 s * pow2 64) % pow2 128);
div_pow2_diff lo 64 s;
assert (lo / pow2 (64-s) == lo * pow2 s / pow2 64);
assert (U64.v r * pow2 64 == hi * pow2 s * pow2 64 % pow2 128 + lo * pow2 s / pow2 64 * pow2 64);
mul_abc_to_acb hi (pow2 s) (pow2 64);
r
val add_mod_small' : n:nat -> m:nat -> k:pos ->
Lemma (requires (n + m % k < k))
(ensures (n + m % k == (n + m) % k))
let add_mod_small' n m k =
Math.lemma_mod_lt (n + m % k) k;
Math.modulo_lemma n k;
mod_add n m k
#push-options "--retry 5"
let shift_t_val (a: t) (s: nat) :
Lemma (v a * pow2 s == U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s)) =
Math.pow2_plus 64 s;
()
#pop-options
val mul_mod_bound : n:nat -> s1:nat -> s2:nat{s2>=s1} ->
Lemma (n * pow2 s1 % pow2 s2 <= pow2 s2 - pow2 s1)
#push-options "--retry 5"
let mul_mod_bound n s1 s2 =
// n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1
// n % pow2 (s2-s1) <= pow2 (s2-s1) - 1
// n % pow2 (s2-s1) * pow2 s1 <= pow2 s2 - pow2 s1
mod_mul n (pow2 s1) (pow2 (s2-s1));
// assert (n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1);
Math.lemma_mod_lt n (pow2 (s2-s1));
Math.lemma_mult_le_right (pow2 s1) (n % pow2 (s2-s1)) (pow2 (s2-s1) - 1);
Math.pow2_plus (s2-s1) s1
#pop-options
let add_lt_le (a a' b b': int) :
Lemma (requires (a < a' /\ b <= b'))
(ensures (a + b < a' + b')) = ()
let u64_pow2_bound (a: UInt.uint_t 64) (s: nat) :
Lemma (a * pow2 s < pow2 (64+s)) =
Math.pow2_plus 64 s;
Math.lemma_mult_le_right (pow2 s) a (pow2 64)
let shift_t_mod_val' (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s) % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
u64_pow2_bound a_l s;
mul_mod_bound a_h (64+s) 128;
// assert (a_h * pow2 (64+s) % pow2 128 <= pow2 128 - pow2 (64+s));
add_lt_le (a_l * pow2 s) (pow2 (64+s)) (a_h * pow2 (64+s) % pow2 128) (pow2 128 - pow2 (64+s));
add_mod_small' (a_l * pow2 s) (a_h * pow2 (64+s)) (pow2 128);
shift_t_val a s;
()
let shift_t_mod_val (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + (U64.v a.high * pow2 64) * pow2 s % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
shift_t_mod_val' a s;
Math.pow2_plus 64 s;
Math.paren_mul_right a_h (pow2 64) (pow2 s);
()
#push-options "--z3rlimit 20"
let shift_left_small (a: t) (s: U32.t) : Pure t
(requires (U32.v s < 64))
(ensures (fun r -> v r = (v a * pow2 (U32.v s)) % pow2 128)) =
if U32.eq s 0ul then a
else
let r = { low = U64.shift_left a.low s;
high = add_u64_shift_left_respec a.high a.low s; } in
let s = U32.v s in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
mod_spec_rew_n (a_l * pow2 s) (pow2 64);
shift_t_mod_val a s;
r
#pop-options
val shift_left_large : a:t -> s:U32.t{U32.v s >= 64 /\ U32.v s < 128} ->
r:t{v r = (v a * pow2 (U32.v s)) % pow2 128}
#push-options "--z3rlimit 50 --retry 5" // sporadically fails
let shift_left_large a s =
let h_shift = U32.sub s u32_64 in
assert (U32.v h_shift < 64);
let r = { low = U64.uint_to_t 0;
high = U64.shift_left a.low h_shift; } in
assert (U64.v r.high == (U64.v a.low * pow2 (U32.v s - 64)) % pow2 64);
mod_mul (U64.v a.low * pow2 (U32.v s - 64)) (pow2 64) (pow2 64);
Math.pow2_plus (U32.v s - 64) 64;
assert (U64.v r.high * pow2 64 == (U64.v a.low * pow2 (U32.v s)) % pow2 128);
shift_left_large_lemma_t a (U32.v s);
r
#pop-options | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val shift_left: a:t -> s:UInt32.t -> Pure t
(requires (U32.v s < n))
(ensures (fun c -> v c = ((v a * pow2 (UInt32.v s)) % pow2 n))) | [] | FStar.UInt128.shift_left | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: FStar.UInt128.t -> s: FStar.UInt32.t -> Prims.Pure FStar.UInt128.t | {
"end_col": 27,
"end_line": 711,
"start_col": 2,
"start_line": 710
} |
FStar.Pervasives.Lemma | val div_add_small: n:nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (k1*m / (k1*k2) == (n + k1*m) / (k1*k2))) | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let div_add_small n m k1 k2 =
div_product (k1*m) k1 k2;
div_product (n+k1*m) k1 k2;
mul_div_cancel m k1;
assert (k1*m/k1 == m);
div_plus_multiple n m k1 | val div_add_small: n:nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (k1*m / (k1*k2) == (n + k1*m) / (k1*k2)))
let div_add_small n m k1 k2 = | false | null | true | div_product (k1 * m) k1 k2;
div_product (n + k1 * m) k1 k2;
mul_div_cancel m k1;
assert (k1 * m / k1 == m);
div_plus_multiple n m k1 | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [
"lemma"
] | [
"Prims.nat",
"Prims.pos",
"FStar.UInt128.div_plus_multiple",
"Prims.unit",
"Prims._assert",
"Prims.eq2",
"Prims.int",
"Prims.op_Division",
"FStar.Mul.op_Star",
"FStar.UInt128.mul_div_cancel",
"FStar.UInt128.div_product",
"Prims.op_Addition"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract
let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63
let carry_uint64_ok (a b:uint_t 64)
: squash (int2bv (carry_uint64 a b) == carry_bv a b)
= _ by (T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl())
let fact1 (a b: uint_t 64) = carry_bv a b == int2bv 1
let fact0 (a b: uint_t 64) = carry_bv a b == int2bv 0
let lem_ult_1 (a b: uint_t 64)
: squash (bvult (int2bv a) (int2bv b) ==> fact1 a b)
= assert (bvult (int2bv a) (int2bv b) ==> fact1 a b)
by (T.norm [delta_only [`%fact1;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'";
smt())
let lem_ult_2 (a b:uint_t 64)
: squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
= assert (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
by (T.norm [delta_only [`%fact0;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'")
let int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b))
= introduce (a < b) ==> (bvult #n (int2bv #n a) (int2bv #n b))
with _ . FStar.BV.int2bv_lemma_ult_1 a b;
introduce (bvult #n (int2bv #n a) (int2bv #n b)) ==> (a < b)
with _ . FStar.BV.int2bv_lemma_ult_2 a b
let lem_ult (a b:uint_t 64)
: Lemma (if a < b
then fact1 a b
else fact0 a b)
= int2bv_ult a b;
lem_ult_1 a b;
lem_ult_2 a b
let constant_time_carry (a b: U64.t) : Tot U64.t =
let open U64 in
// CONSTANT_TIME_CARRY macro
// ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
// 63 = sizeof(a) * 8 - 1
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63ul
let carry_uint64_equiv (a b:UInt64.t)
: Lemma (U64.v (constant_time_carry a b) == carry_uint64 (U64.v a) (U64.v b))
= ()
// This type gets a special treatment in KaRaMeL and its definition is never
// printed in the resulting C file.
type uint128: Type0 = { low: U64.t; high: U64.t }
let t = uint128
let _ = intro_ambient n
let _ = intro_ambient t
[@@ noextract_to "krml"]
let v x = U64.v x.low + (U64.v x.high) * (pow2 64)
let div_mod (x:nat) (k:nat{k > 0}) : Lemma (x / k * k + x % k == x) = ()
let uint_to_t x =
div_mod x (pow2 64);
{ low = U64.uint_to_t (x % (pow2 64));
high = U64.uint_to_t (x / (pow2 64)); }
let v_inj (x1 x2: t): Lemma (requires (v x1 == v x2))
(ensures x1 == x2) =
assert (uint_to_t (v x1) == uint_to_t (v x2));
assert (uint_to_t (v x1) == x1);
assert (uint_to_t (v x2) == x2);
()
(* A weird helper used below... seems like the native encoding of
bitvectors may be making these proofs much harder than they should be? *)
let bv2int_fun (#n:pos) (a b : bv_t n)
: Lemma (requires a == b)
(ensures bv2int a == bv2int b)
= ()
(* This proof is quite brittle. It has a bunch of annotations to get
decent verification performance. *)
let constant_time_carry_ok (a b:U64.t)
: Lemma (constant_time_carry a b ==
(if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0))
= calc (==) {
U64.v (constant_time_carry a b);
(==) { carry_uint64_equiv a b }
carry_uint64 (U64.v a) (U64.v b);
(==) { inverse_num_lemma (carry_uint64 (U64.v a) (U64.v b)) }
bv2int (int2bv (carry_uint64 (U64.v a) (U64.v b)));
(==) { carry_uint64_ok (U64.v a) (U64.v b);
bv2int_fun (int2bv (carry_uint64 (U64.v a) (U64.v b))) (carry_bv (U64.v a) (U64.v b));
()
}
bv2int (carry_bv (U64.v a) (U64.v b));
(==) {
lem_ult (U64.v a) (U64.v b);
bv2int_fun (carry_bv (U64.v a) (U64.v b)) (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
}
bv2int
(if U64.v a < U64.v b
then int2bv 1
else int2bv 0);
};
assert (
bv2int (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
== U64.v (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)) by (T.norm []);
U64.v_inj (constant_time_carry a b) (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)
let carry (a b: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r == (if U64.v a < U64.v b then 1 else 0))) =
constant_time_carry_ok a b;
constant_time_carry a b
let carry_sum_ok (a b:U64.t) :
Lemma (U64.v (carry (U64.add_mod a b) b) == (U64.v a + U64.v b) / (pow2 64)) = ()
let add (a b: t) : Pure t
(requires (v a + v b < pow2 128))
(ensures (fun r -> v a + v b = v r)) =
let l = U64.add_mod a.low b.low in
carry_sum_ok a.low b.low;
{ low = l;
high = U64.add (U64.add a.high b.high) (carry l b.low); }
let add_underspec (a b: t) =
let l = U64.add_mod a.low b.low in
begin
if v a + v b < pow2 128
then carry_sum_ok a.low b.low
else ()
end;
{ low = l;
high = U64.add_underspec (U64.add_underspec a.high b.high) (carry l b.low); }
val mod_mod: a:nat -> k:nat{k>0} -> k':nat{k'>0} ->
Lemma ((a % k) % (k'*k) == a % k)
let mod_mod a k k' =
assert (a % k < k);
assert (a % k < k' * k)
let mod_spec (a:nat) (k:nat{k > 0}) :
Lemma (a % k == a - a / k * k) = ()
val div_product : n:nat -> m1:nat{m1>0} -> m2:nat{m2>0} ->
Lemma (n / (m1*m2) == (n / m1) / m2)
let div_product n m1 m2 =
Math.division_multiplication_lemma n m1 m2
val mul_div_cancel : n:nat -> k:nat{k>0} ->
Lemma ((n * k) / k == n)
let mul_div_cancel n k =
Math.cancel_mul_div n k
val mod_mul: n:nat -> k1:pos -> k2:pos ->
Lemma ((n % k2) * k1 == (n * k1) % (k1*k2))
let mod_mul n k1 k2 =
Math.modulo_scale_lemma n k1 k2
let mod_spec_rew_n (n:nat) (k:nat{k > 0}) :
Lemma (n == n / k * k + n % k) = mod_spec n k
val mod_add: n1:nat -> n2:nat -> k:nat{k > 0} ->
Lemma ((n1 % k + n2 % k) % k == (n1 + n2) % k)
let mod_add n1 n2 k = Math.modulo_distributivity n1 n2 k
val mod_add_small: n1:nat -> n2:nat -> k:nat{k > 0} -> Lemma
(requires (n1 % k + n2 % k < k))
(ensures (n1 % k + n2 % k == (n1 + n2) % k))
let mod_add_small n1 n2 k =
mod_add n1 n2 k;
Math.small_modulo_lemma_1 (n1%k + n2%k) k
// This proof is pretty stable with the calc proof, but it can fail
// ~1% of the times, so add a retry.
#push-options "--split_queries no --z3rlimit 20 --retry 5"
let add_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a + v b) % pow2 128 = v r)) =
let l = U64.add_mod a.low b.low in
let r = { low = l;
high = U64.add_mod (U64.add_mod a.high b.high) (carry l b.low)} in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
let b_l = U64.v b.low in
let b_h = U64.v b.high in
carry_sum_ok a.low b.low;
Math.lemma_mod_plus_distr_l (a_h + b_h) ((a_l + b_l) / (pow2 64)) (pow2 64);
calc (==) {
U64.v r.high * pow2 64;
== {}
((a_h + b_h + (a_l + b_l) / (pow2 64)) % pow2 64) * pow2 64;
== { mod_mul (a_h + b_h + (a_l + b_l) / (pow2 64)) (pow2 64) (pow2 64) }
((a_h + b_h + (a_l + b_l) / (pow2 64)) * pow2 64) % (pow2 64 * pow2 64);
== {}
((a_h + b_h + (a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
== {}
(a_h * pow2 64 + b_h * pow2 64 + ((a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
};
assert (U64.v r.low == (U64.v r.low) % pow2 128);
mod_add_small (a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64))
((a_l + b_l) % (pow2 64))
(pow2 128);
assert (U64.v r.low + U64.v r.high * pow2 64 ==
(a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64) + (a_l + b_l) % (pow2 64)) % pow2 128);
mod_spec_rew_n (a_l + b_l) (pow2 64);
assert (v r ==
(a_h * pow2 64 +
b_h * pow2 64 +
a_l + b_l) % pow2 128);
assert_spinoff ((v a + v b) % pow2 128 = v r);
r
#pop-options
#push-options "--retry 5"
let sub (a b: t) : Pure t
(requires (v a - v b >= 0))
(ensures (fun r -> v r = v a - v b)) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub (U64.sub a.high b.high) (carry a.low l); }
#pop-options
let sub_underspec (a b: t) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_underspec (U64.sub_underspec a.high b.high) (carry a.low l); }
let sub_mod_impl (a b: t) : t =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_mod (U64.sub_mod a.high b.high) (carry a.low l); }
#push-options "--retry 10" // flaky
let sub_mod_pos_ok (a b:t) : Lemma
(requires (v a - v b >= 0))
(ensures (v (sub_mod_impl a b) = v a - v b)) =
assert (sub a b == sub_mod_impl a b);
()
#pop-options
val u64_diff_wrap : a:U64.t -> b:U64.t ->
Lemma (requires (U64.v a < U64.v b))
(ensures (U64.v (U64.sub_mod a b) == U64.v a - U64.v b + pow2 64))
let u64_diff_wrap a b = ()
#push-options "--z3rlimit 20"
val sub_mod_wrap1_ok : a:t -> b:t -> Lemma
(requires (v a - v b < 0 /\ U64.v a.low < U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128))
#push-options "--retry 10"
let sub_mod_wrap1_ok a b =
// carry == 1 and subtraction in low wraps
let l = U64.sub_mod a.low b.low in
assert (U64.v (carry a.low l) == 1);
u64_diff_wrap a.low b.low;
// a.high <= b.high since v a < v b;
// case split on equality and strictly less
if U64.v a.high = U64.v b.high then ()
else begin
u64_diff_wrap a.high b.high;
()
end
#pop-options
let sum_lt (a1 a2 b1 b2:nat) : Lemma
(requires (a1 + a2 < b1 + b2 /\ a1 >= b1))
(ensures (a2 < b2)) = ()
let sub_mod_wrap2_ok (a b:t) : Lemma
(requires (v a - v b < 0 /\ U64.v a.low >= U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
// carry == 0, subtraction in low is exact, but subtraction in high
// must wrap since v a < v b
let l = U64.sub_mod a.low b.low in
let r = sub_mod_impl a b in
assert (U64.v l == U64.v a.low - U64.v b.low);
assert (U64.v (carry a.low l) == 0);
sum_lt (U64.v a.low) (U64.v a.high * pow2 64) (U64.v b.low) (U64.v b.high * pow2 64);
assert (U64.v (U64.sub_mod a.high b.high) == U64.v a.high - U64.v b.high + pow2 64);
()
let sub_mod_wrap_ok (a b:t) : Lemma
(requires (v a - v b < 0))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
if U64.v a.low < U64.v b.low
then sub_mod_wrap1_ok a b
else sub_mod_wrap2_ok a b
#push-options "--z3rlimit 40"
let sub_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = (v a - v b) % pow2 128)) =
(if v a - v b >= 0
then sub_mod_pos_ok a b
else sub_mod_wrap_ok a b);
sub_mod_impl a b
#pop-options
val shift_bound : #n:nat -> num:UInt.uint_t n -> n':nat ->
Lemma (num * pow2 n' <= pow2 (n'+n) - pow2 n')
let shift_bound #n num n' =
Math.lemma_mult_le_right (pow2 n') num (pow2 n - 1);
Math.distributivity_sub_left (pow2 n) 1 (pow2 n');
Math.pow2_plus n' n
val append_uint : #n1:nat -> #n2:nat -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 -> UInt.uint_t (n1+n2)
let append_uint #n1 #n2 num1 num2 =
shift_bound num2 n1;
num1 + num2 * pow2 n1
val to_vec_append : #n1:nat{n1 > 0} -> #n2:nat{n2 > 0} -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 ->
Lemma (UInt.to_vec (append_uint num1 num2) == Seq.append (UInt.to_vec num2) (UInt.to_vec num1))
let to_vec_append #n1 #n2 num1 num2 =
UInt.append_lemma (UInt.to_vec num2) (UInt.to_vec num1)
let vec128 (a: t) : BV.bv_t 128 = UInt.to_vec #128 (v a)
let vec64 (a: U64.t) : BV.bv_t 64 = UInt.to_vec (U64.v a)
let to_vec_v (a: t) :
Lemma (vec128 a == Seq.append (vec64 a.high) (vec64 a.low)) =
to_vec_append (U64.v a.low) (U64.v a.high)
val logand_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2) ==
BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2))
(BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logand #128 (v a) (v b))) =
let r = { low = U64.logand a.low b.low;
high = U64.logand a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logand_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logand_vec (vec128 a) (vec128 b));
r
val logxor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2) ==
BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2))
(BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logxor #128 (v a) (v b))) =
let r = { low = U64.logxor a.low b.low;
high = U64.logxor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logxor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logxor_vec (vec128 a) (vec128 b));
r
val logor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2) ==
BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2))
(BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logor #128 (v a) (v b))) =
let r = { low = U64.logor a.low b.low;
high = U64.logor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logor_vec (vec128 a) (vec128 b));
r
val lognot_vec_append (#n1 #n2: pos) (a1: BV.bv_t n1) (a2: BV.bv_t n2) :
Lemma (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2) ==
BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot_vec_append #n1 #n2 a1 a2 =
Seq.lemma_eq_intro (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2))
(BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot (a: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.lognot #128 (v a))) =
let r = { low = U64.lognot a.low;
high = U64.lognot a.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
lognot_vec_append (vec64 a.high) (vec64 a.low);
to_vec_v a;
assert (vec128 r == BV.lognot_vec (vec128 a));
r
let mod_mul_cancel (n:nat) (k:nat{k > 0}) :
Lemma ((n * k) % k == 0) =
mod_spec (n * k) k;
mul_div_cancel n k;
()
let shift_past_mod (n:nat) (k1:nat) (k2:nat{k2 >= k1}) :
Lemma ((n * pow2 k2) % pow2 k1 == 0) =
assert (k2 == (k2 - k1) + k1);
Math.pow2_plus (k2 - k1) k1;
Math.paren_mul_right n (pow2 (k2 - k1)) (pow2 k1);
mod_mul_cancel (n * pow2 (k2 - k1)) (pow2 k1)
val mod_double: a:nat -> k:nat{k>0} ->
Lemma (a % k % k == a % k)
let mod_double a k =
mod_mod a k 1
let shift_left_large_val (#n1:nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s:nat) :
Lemma ((a1 + a2 * pow2 n1) * pow2 s == (a1 * pow2 s + a2 * pow2 (n1+s))) =
Math.distributivity_add_left a1 (a2 * pow2 n1) (pow2 s);
Math.paren_mul_right a2 (pow2 n1) (pow2 s);
Math.pow2_plus n1 s
#push-options "--z3rlimit 40"
let shift_left_large_lemma (#n1: nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s: nat{s >= n2}) :
Lemma (((a1 + a2 * pow2 n1) * pow2 s) % pow2 (n1+n2) ==
(a1 * pow2 s) % pow2 (n1+n2)) =
shift_left_large_val a1 a2 s;
mod_add (a1 * pow2 s) (a2 * pow2 (n1+s)) (pow2 (n1+n2));
shift_past_mod a2 (n1+n2) (n1+s);
mod_double (a1 * pow2 s) (pow2 (n1+n2));
()
#pop-options
val shift_left_large_lemma_t : a:t -> s:nat ->
Lemma (requires (s >= 64))
(ensures ((v a * pow2 s) % pow2 128 ==
(U64.v a.low * pow2 s) % pow2 128))
let shift_left_large_lemma_t a s =
shift_left_large_lemma #64 #64 (U64.v a.low) (U64.v a.high) s
private let u32_64: n:U32.t{U32.v n == 64} = U32.uint_to_t 64
val div_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} -> Lemma
(requires True)
(ensures (a / pow2 (n1 - n2) == a * pow2 n2 / pow2 n1))
let div_pow2_diff a n1 n2 =
Math.pow2_plus n2 (n1-n2);
assert (a * pow2 n2 / pow2 n1 == a * pow2 n2 / (pow2 n2 * pow2 (n1 - n2)));
div_product (a * pow2 n2) (pow2 n2) (pow2 (n1-n2));
mul_div_cancel a (pow2 n2)
val mod_mul_pow2 : n:nat -> e1:nat -> e2:nat ->
Lemma (n % pow2 e1 * pow2 e2 <= pow2 (e1+e2) - pow2 e2)
let mod_mul_pow2 n e1 e2 =
Math.lemma_mod_lt n (pow2 e1);
Math.lemma_mult_le_right (pow2 e2) (n % pow2 e1) (pow2 e1 - 1);
assert (n % pow2 e1 * pow2 e2 <= pow2 e1 * pow2 e2 - pow2 e2);
Math.pow2_plus e1 e2
let pow2_div_bound #b (n:UInt.uint_t b) (s:nat{s <= b}) :
Lemma (n / pow2 s < pow2 (b - s)) =
Math.lemma_div_lt n b s
#push-options "--smtencoding.l_arith_repr native --z3rlimit 40"
let add_u64_shift_left (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r = (U64.v hi * pow2 (U32.v s)) % pow2 64 + U64.v lo / pow2 (64 - U32.v s))) =
let high = U64.shift_left hi s in
let low = U64.shift_right lo (U32.sub u32_64 s) in
let s = U32.v s in
let high_n = U64.v hi % pow2 (64 - s) * pow2 s in
let low_n = U64.v lo / pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 s) (pow2 (64-s));
assert (U64.v high == high_n);
assert (U64.v low == low_n);
pow2_div_bound (U64.v lo) (64-s);
assert (low_n < pow2 s);
mod_mul_pow2 (U64.v hi) (64 - s) s;
U64.add high low
#pop-options
let div_plus_multiple (a:nat) (b:nat) (k:pos) :
Lemma (requires (a < k))
(ensures ((a + b * k) / k == b)) =
Math.division_addition_lemma a k b;
Math.small_division_lemma_1 a k
val div_add_small: n:nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (k1*m / (k1*k2) == (n + k1*m) / (k1*k2))) | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val div_add_small: n:nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (k1*m / (k1*k2) == (n + k1*m) / (k1*k2))) | [] | FStar.UInt128.div_add_small | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | n: Prims.nat -> m: Prims.nat -> k1: Prims.pos -> k2: Prims.pos
-> FStar.Pervasives.Lemma (requires n < k1)
(ensures k1 * m / (k1 * k2) == (n + k1 * m) / (k1 * k2)) | {
"end_col": 26,
"end_line": 576,
"start_col": 2,
"start_line": 572
} |
Prims.Tot | val carry_uint64_ok (a b: uint_t 64) : squash (int2bv (carry_uint64 a b) == carry_bv a b) | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let carry_uint64_ok (a b:uint_t 64)
: squash (int2bv (carry_uint64 a b) == carry_bv a b)
= _ by (T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl()) | val carry_uint64_ok (a b: uint_t 64) : squash (int2bv (carry_uint64 a b) == carry_bv a b)
let carry_uint64_ok (a b: uint_t 64) : squash (int2bv (carry_uint64 a b) == carry_bv a b) = | false | null | true | FStar.Tactics.Effect.synth_by_tactic (fun _ ->
(T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl ())) | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [
"total"
] | [
"FStar.UInt.uint_t",
"Prims.squash",
"Prims.eq2",
"FStar.BV.bv_t",
"FStar.BV.int2bv",
"FStar.UInt128.carry_uint64",
"FStar.UInt128.carry_bv"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract
let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63
let carry_uint64_ok (a b:uint_t 64) | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val carry_uint64_ok (a b: uint_t 64) : squash (int2bv (carry_uint64 a b) == carry_bv a b) | [] | FStar.UInt128.carry_uint64_ok | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: FStar.UInt.uint_t 64 -> b: FStar.UInt.uint_t 64
-> Prims.squash (FStar.BV.int2bv (FStar.UInt128.carry_uint64 a b) == FStar.UInt128.carry_bv a b) | {
"end_col": 18,
"end_line": 65,
"start_col": 4,
"start_line": 59
} |
FStar.Pervasives.Lemma | val mod_mul_pow2 : n:nat -> e1:nat -> e2:nat ->
Lemma (n % pow2 e1 * pow2 e2 <= pow2 (e1+e2) - pow2 e2) | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let mod_mul_pow2 n e1 e2 =
Math.lemma_mod_lt n (pow2 e1);
Math.lemma_mult_le_right (pow2 e2) (n % pow2 e1) (pow2 e1 - 1);
assert (n % pow2 e1 * pow2 e2 <= pow2 e1 * pow2 e2 - pow2 e2);
Math.pow2_plus e1 e2 | val mod_mul_pow2 : n:nat -> e1:nat -> e2:nat ->
Lemma (n % pow2 e1 * pow2 e2 <= pow2 (e1+e2) - pow2 e2)
let mod_mul_pow2 n e1 e2 = | false | null | true | Math.lemma_mod_lt n (pow2 e1);
Math.lemma_mult_le_right (pow2 e2) (n % pow2 e1) (pow2 e1 - 1);
assert ((n % pow2 e1) * pow2 e2 <= pow2 e1 * pow2 e2 - pow2 e2);
Math.pow2_plus e1 e2 | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [
"lemma"
] | [
"Prims.nat",
"FStar.Math.Lemmas.pow2_plus",
"Prims.unit",
"Prims._assert",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"FStar.Mul.op_Star",
"Prims.op_Modulus",
"Prims.pow2",
"Prims.op_Subtraction",
"FStar.Math.Lemmas.lemma_mult_le_right",
"FStar.Math.Lemmas.lemma_mod_lt"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract
let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63
let carry_uint64_ok (a b:uint_t 64)
: squash (int2bv (carry_uint64 a b) == carry_bv a b)
= _ by (T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl())
let fact1 (a b: uint_t 64) = carry_bv a b == int2bv 1
let fact0 (a b: uint_t 64) = carry_bv a b == int2bv 0
let lem_ult_1 (a b: uint_t 64)
: squash (bvult (int2bv a) (int2bv b) ==> fact1 a b)
= assert (bvult (int2bv a) (int2bv b) ==> fact1 a b)
by (T.norm [delta_only [`%fact1;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'";
smt())
let lem_ult_2 (a b:uint_t 64)
: squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
= assert (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
by (T.norm [delta_only [`%fact0;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'")
let int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b))
= introduce (a < b) ==> (bvult #n (int2bv #n a) (int2bv #n b))
with _ . FStar.BV.int2bv_lemma_ult_1 a b;
introduce (bvult #n (int2bv #n a) (int2bv #n b)) ==> (a < b)
with _ . FStar.BV.int2bv_lemma_ult_2 a b
let lem_ult (a b:uint_t 64)
: Lemma (if a < b
then fact1 a b
else fact0 a b)
= int2bv_ult a b;
lem_ult_1 a b;
lem_ult_2 a b
let constant_time_carry (a b: U64.t) : Tot U64.t =
let open U64 in
// CONSTANT_TIME_CARRY macro
// ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
// 63 = sizeof(a) * 8 - 1
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63ul
let carry_uint64_equiv (a b:UInt64.t)
: Lemma (U64.v (constant_time_carry a b) == carry_uint64 (U64.v a) (U64.v b))
= ()
// This type gets a special treatment in KaRaMeL and its definition is never
// printed in the resulting C file.
type uint128: Type0 = { low: U64.t; high: U64.t }
let t = uint128
let _ = intro_ambient n
let _ = intro_ambient t
[@@ noextract_to "krml"]
let v x = U64.v x.low + (U64.v x.high) * (pow2 64)
let div_mod (x:nat) (k:nat{k > 0}) : Lemma (x / k * k + x % k == x) = ()
let uint_to_t x =
div_mod x (pow2 64);
{ low = U64.uint_to_t (x % (pow2 64));
high = U64.uint_to_t (x / (pow2 64)); }
let v_inj (x1 x2: t): Lemma (requires (v x1 == v x2))
(ensures x1 == x2) =
assert (uint_to_t (v x1) == uint_to_t (v x2));
assert (uint_to_t (v x1) == x1);
assert (uint_to_t (v x2) == x2);
()
(* A weird helper used below... seems like the native encoding of
bitvectors may be making these proofs much harder than they should be? *)
let bv2int_fun (#n:pos) (a b : bv_t n)
: Lemma (requires a == b)
(ensures bv2int a == bv2int b)
= ()
(* This proof is quite brittle. It has a bunch of annotations to get
decent verification performance. *)
let constant_time_carry_ok (a b:U64.t)
: Lemma (constant_time_carry a b ==
(if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0))
= calc (==) {
U64.v (constant_time_carry a b);
(==) { carry_uint64_equiv a b }
carry_uint64 (U64.v a) (U64.v b);
(==) { inverse_num_lemma (carry_uint64 (U64.v a) (U64.v b)) }
bv2int (int2bv (carry_uint64 (U64.v a) (U64.v b)));
(==) { carry_uint64_ok (U64.v a) (U64.v b);
bv2int_fun (int2bv (carry_uint64 (U64.v a) (U64.v b))) (carry_bv (U64.v a) (U64.v b));
()
}
bv2int (carry_bv (U64.v a) (U64.v b));
(==) {
lem_ult (U64.v a) (U64.v b);
bv2int_fun (carry_bv (U64.v a) (U64.v b)) (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
}
bv2int
(if U64.v a < U64.v b
then int2bv 1
else int2bv 0);
};
assert (
bv2int (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
== U64.v (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)) by (T.norm []);
U64.v_inj (constant_time_carry a b) (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)
let carry (a b: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r == (if U64.v a < U64.v b then 1 else 0))) =
constant_time_carry_ok a b;
constant_time_carry a b
let carry_sum_ok (a b:U64.t) :
Lemma (U64.v (carry (U64.add_mod a b) b) == (U64.v a + U64.v b) / (pow2 64)) = ()
let add (a b: t) : Pure t
(requires (v a + v b < pow2 128))
(ensures (fun r -> v a + v b = v r)) =
let l = U64.add_mod a.low b.low in
carry_sum_ok a.low b.low;
{ low = l;
high = U64.add (U64.add a.high b.high) (carry l b.low); }
let add_underspec (a b: t) =
let l = U64.add_mod a.low b.low in
begin
if v a + v b < pow2 128
then carry_sum_ok a.low b.low
else ()
end;
{ low = l;
high = U64.add_underspec (U64.add_underspec a.high b.high) (carry l b.low); }
val mod_mod: a:nat -> k:nat{k>0} -> k':nat{k'>0} ->
Lemma ((a % k) % (k'*k) == a % k)
let mod_mod a k k' =
assert (a % k < k);
assert (a % k < k' * k)
let mod_spec (a:nat) (k:nat{k > 0}) :
Lemma (a % k == a - a / k * k) = ()
val div_product : n:nat -> m1:nat{m1>0} -> m2:nat{m2>0} ->
Lemma (n / (m1*m2) == (n / m1) / m2)
let div_product n m1 m2 =
Math.division_multiplication_lemma n m1 m2
val mul_div_cancel : n:nat -> k:nat{k>0} ->
Lemma ((n * k) / k == n)
let mul_div_cancel n k =
Math.cancel_mul_div n k
val mod_mul: n:nat -> k1:pos -> k2:pos ->
Lemma ((n % k2) * k1 == (n * k1) % (k1*k2))
let mod_mul n k1 k2 =
Math.modulo_scale_lemma n k1 k2
let mod_spec_rew_n (n:nat) (k:nat{k > 0}) :
Lemma (n == n / k * k + n % k) = mod_spec n k
val mod_add: n1:nat -> n2:nat -> k:nat{k > 0} ->
Lemma ((n1 % k + n2 % k) % k == (n1 + n2) % k)
let mod_add n1 n2 k = Math.modulo_distributivity n1 n2 k
val mod_add_small: n1:nat -> n2:nat -> k:nat{k > 0} -> Lemma
(requires (n1 % k + n2 % k < k))
(ensures (n1 % k + n2 % k == (n1 + n2) % k))
let mod_add_small n1 n2 k =
mod_add n1 n2 k;
Math.small_modulo_lemma_1 (n1%k + n2%k) k
// This proof is pretty stable with the calc proof, but it can fail
// ~1% of the times, so add a retry.
#push-options "--split_queries no --z3rlimit 20 --retry 5"
let add_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a + v b) % pow2 128 = v r)) =
let l = U64.add_mod a.low b.low in
let r = { low = l;
high = U64.add_mod (U64.add_mod a.high b.high) (carry l b.low)} in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
let b_l = U64.v b.low in
let b_h = U64.v b.high in
carry_sum_ok a.low b.low;
Math.lemma_mod_plus_distr_l (a_h + b_h) ((a_l + b_l) / (pow2 64)) (pow2 64);
calc (==) {
U64.v r.high * pow2 64;
== {}
((a_h + b_h + (a_l + b_l) / (pow2 64)) % pow2 64) * pow2 64;
== { mod_mul (a_h + b_h + (a_l + b_l) / (pow2 64)) (pow2 64) (pow2 64) }
((a_h + b_h + (a_l + b_l) / (pow2 64)) * pow2 64) % (pow2 64 * pow2 64);
== {}
((a_h + b_h + (a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
== {}
(a_h * pow2 64 + b_h * pow2 64 + ((a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
};
assert (U64.v r.low == (U64.v r.low) % pow2 128);
mod_add_small (a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64))
((a_l + b_l) % (pow2 64))
(pow2 128);
assert (U64.v r.low + U64.v r.high * pow2 64 ==
(a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64) + (a_l + b_l) % (pow2 64)) % pow2 128);
mod_spec_rew_n (a_l + b_l) (pow2 64);
assert (v r ==
(a_h * pow2 64 +
b_h * pow2 64 +
a_l + b_l) % pow2 128);
assert_spinoff ((v a + v b) % pow2 128 = v r);
r
#pop-options
#push-options "--retry 5"
let sub (a b: t) : Pure t
(requires (v a - v b >= 0))
(ensures (fun r -> v r = v a - v b)) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub (U64.sub a.high b.high) (carry a.low l); }
#pop-options
let sub_underspec (a b: t) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_underspec (U64.sub_underspec a.high b.high) (carry a.low l); }
let sub_mod_impl (a b: t) : t =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_mod (U64.sub_mod a.high b.high) (carry a.low l); }
#push-options "--retry 10" // flaky
let sub_mod_pos_ok (a b:t) : Lemma
(requires (v a - v b >= 0))
(ensures (v (sub_mod_impl a b) = v a - v b)) =
assert (sub a b == sub_mod_impl a b);
()
#pop-options
val u64_diff_wrap : a:U64.t -> b:U64.t ->
Lemma (requires (U64.v a < U64.v b))
(ensures (U64.v (U64.sub_mod a b) == U64.v a - U64.v b + pow2 64))
let u64_diff_wrap a b = ()
#push-options "--z3rlimit 20"
val sub_mod_wrap1_ok : a:t -> b:t -> Lemma
(requires (v a - v b < 0 /\ U64.v a.low < U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128))
#push-options "--retry 10"
let sub_mod_wrap1_ok a b =
// carry == 1 and subtraction in low wraps
let l = U64.sub_mod a.low b.low in
assert (U64.v (carry a.low l) == 1);
u64_diff_wrap a.low b.low;
// a.high <= b.high since v a < v b;
// case split on equality and strictly less
if U64.v a.high = U64.v b.high then ()
else begin
u64_diff_wrap a.high b.high;
()
end
#pop-options
let sum_lt (a1 a2 b1 b2:nat) : Lemma
(requires (a1 + a2 < b1 + b2 /\ a1 >= b1))
(ensures (a2 < b2)) = ()
let sub_mod_wrap2_ok (a b:t) : Lemma
(requires (v a - v b < 0 /\ U64.v a.low >= U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
// carry == 0, subtraction in low is exact, but subtraction in high
// must wrap since v a < v b
let l = U64.sub_mod a.low b.low in
let r = sub_mod_impl a b in
assert (U64.v l == U64.v a.low - U64.v b.low);
assert (U64.v (carry a.low l) == 0);
sum_lt (U64.v a.low) (U64.v a.high * pow2 64) (U64.v b.low) (U64.v b.high * pow2 64);
assert (U64.v (U64.sub_mod a.high b.high) == U64.v a.high - U64.v b.high + pow2 64);
()
let sub_mod_wrap_ok (a b:t) : Lemma
(requires (v a - v b < 0))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
if U64.v a.low < U64.v b.low
then sub_mod_wrap1_ok a b
else sub_mod_wrap2_ok a b
#push-options "--z3rlimit 40"
let sub_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = (v a - v b) % pow2 128)) =
(if v a - v b >= 0
then sub_mod_pos_ok a b
else sub_mod_wrap_ok a b);
sub_mod_impl a b
#pop-options
val shift_bound : #n:nat -> num:UInt.uint_t n -> n':nat ->
Lemma (num * pow2 n' <= pow2 (n'+n) - pow2 n')
let shift_bound #n num n' =
Math.lemma_mult_le_right (pow2 n') num (pow2 n - 1);
Math.distributivity_sub_left (pow2 n) 1 (pow2 n');
Math.pow2_plus n' n
val append_uint : #n1:nat -> #n2:nat -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 -> UInt.uint_t (n1+n2)
let append_uint #n1 #n2 num1 num2 =
shift_bound num2 n1;
num1 + num2 * pow2 n1
val to_vec_append : #n1:nat{n1 > 0} -> #n2:nat{n2 > 0} -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 ->
Lemma (UInt.to_vec (append_uint num1 num2) == Seq.append (UInt.to_vec num2) (UInt.to_vec num1))
let to_vec_append #n1 #n2 num1 num2 =
UInt.append_lemma (UInt.to_vec num2) (UInt.to_vec num1)
let vec128 (a: t) : BV.bv_t 128 = UInt.to_vec #128 (v a)
let vec64 (a: U64.t) : BV.bv_t 64 = UInt.to_vec (U64.v a)
let to_vec_v (a: t) :
Lemma (vec128 a == Seq.append (vec64 a.high) (vec64 a.low)) =
to_vec_append (U64.v a.low) (U64.v a.high)
val logand_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2) ==
BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2))
(BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logand #128 (v a) (v b))) =
let r = { low = U64.logand a.low b.low;
high = U64.logand a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logand_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logand_vec (vec128 a) (vec128 b));
r
val logxor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2) ==
BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2))
(BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logxor #128 (v a) (v b))) =
let r = { low = U64.logxor a.low b.low;
high = U64.logxor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logxor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logxor_vec (vec128 a) (vec128 b));
r
val logor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2) ==
BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2))
(BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logor #128 (v a) (v b))) =
let r = { low = U64.logor a.low b.low;
high = U64.logor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logor_vec (vec128 a) (vec128 b));
r
val lognot_vec_append (#n1 #n2: pos) (a1: BV.bv_t n1) (a2: BV.bv_t n2) :
Lemma (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2) ==
BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot_vec_append #n1 #n2 a1 a2 =
Seq.lemma_eq_intro (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2))
(BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot (a: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.lognot #128 (v a))) =
let r = { low = U64.lognot a.low;
high = U64.lognot a.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
lognot_vec_append (vec64 a.high) (vec64 a.low);
to_vec_v a;
assert (vec128 r == BV.lognot_vec (vec128 a));
r
let mod_mul_cancel (n:nat) (k:nat{k > 0}) :
Lemma ((n * k) % k == 0) =
mod_spec (n * k) k;
mul_div_cancel n k;
()
let shift_past_mod (n:nat) (k1:nat) (k2:nat{k2 >= k1}) :
Lemma ((n * pow2 k2) % pow2 k1 == 0) =
assert (k2 == (k2 - k1) + k1);
Math.pow2_plus (k2 - k1) k1;
Math.paren_mul_right n (pow2 (k2 - k1)) (pow2 k1);
mod_mul_cancel (n * pow2 (k2 - k1)) (pow2 k1)
val mod_double: a:nat -> k:nat{k>0} ->
Lemma (a % k % k == a % k)
let mod_double a k =
mod_mod a k 1
let shift_left_large_val (#n1:nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s:nat) :
Lemma ((a1 + a2 * pow2 n1) * pow2 s == (a1 * pow2 s + a2 * pow2 (n1+s))) =
Math.distributivity_add_left a1 (a2 * pow2 n1) (pow2 s);
Math.paren_mul_right a2 (pow2 n1) (pow2 s);
Math.pow2_plus n1 s
#push-options "--z3rlimit 40"
let shift_left_large_lemma (#n1: nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s: nat{s >= n2}) :
Lemma (((a1 + a2 * pow2 n1) * pow2 s) % pow2 (n1+n2) ==
(a1 * pow2 s) % pow2 (n1+n2)) =
shift_left_large_val a1 a2 s;
mod_add (a1 * pow2 s) (a2 * pow2 (n1+s)) (pow2 (n1+n2));
shift_past_mod a2 (n1+n2) (n1+s);
mod_double (a1 * pow2 s) (pow2 (n1+n2));
()
#pop-options
val shift_left_large_lemma_t : a:t -> s:nat ->
Lemma (requires (s >= 64))
(ensures ((v a * pow2 s) % pow2 128 ==
(U64.v a.low * pow2 s) % pow2 128))
let shift_left_large_lemma_t a s =
shift_left_large_lemma #64 #64 (U64.v a.low) (U64.v a.high) s
private let u32_64: n:U32.t{U32.v n == 64} = U32.uint_to_t 64
val div_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} -> Lemma
(requires True)
(ensures (a / pow2 (n1 - n2) == a * pow2 n2 / pow2 n1))
let div_pow2_diff a n1 n2 =
Math.pow2_plus n2 (n1-n2);
assert (a * pow2 n2 / pow2 n1 == a * pow2 n2 / (pow2 n2 * pow2 (n1 - n2)));
div_product (a * pow2 n2) (pow2 n2) (pow2 (n1-n2));
mul_div_cancel a (pow2 n2)
val mod_mul_pow2 : n:nat -> e1:nat -> e2:nat ->
Lemma (n % pow2 e1 * pow2 e2 <= pow2 (e1+e2) - pow2 e2) | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val mod_mul_pow2 : n:nat -> e1:nat -> e2:nat ->
Lemma (n % pow2 e1 * pow2 e2 <= pow2 (e1+e2) - pow2 e2) | [] | FStar.UInt128.mod_mul_pow2 | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | n: Prims.nat -> e1: Prims.nat -> e2: Prims.nat
-> FStar.Pervasives.Lemma
(ensures (n % Prims.pow2 e1) * Prims.pow2 e2 <= Prims.pow2 (e1 + e2) - Prims.pow2 e2) | {
"end_col": 22,
"end_line": 536,
"start_col": 2,
"start_line": 533
} |
Prims.Tot | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let mul_wide_high (x y: U64.t) =
phh x y +
(phl x y + pll_h x y) / pow2 32 +
(plh x y + (phl x y + pll_h x y) % pow2 32) / pow2 32 | let mul_wide_high (x y: U64.t) = | false | null | false | phh x y + (phl x y + pll_h x y) / pow2 32 + (plh x y + (phl x y + pll_h x y) % pow2 32) / pow2 32 | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [
"total"
] | [
"FStar.UInt64.t",
"Prims.op_Addition",
"FStar.UInt128.phh",
"Prims.op_Division",
"FStar.UInt128.phl",
"FStar.UInt128.pll_h",
"Prims.pow2",
"FStar.UInt128.plh",
"Prims.op_Modulus",
"Prims.int"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract
let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63
let carry_uint64_ok (a b:uint_t 64)
: squash (int2bv (carry_uint64 a b) == carry_bv a b)
= _ by (T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl())
let fact1 (a b: uint_t 64) = carry_bv a b == int2bv 1
let fact0 (a b: uint_t 64) = carry_bv a b == int2bv 0
let lem_ult_1 (a b: uint_t 64)
: squash (bvult (int2bv a) (int2bv b) ==> fact1 a b)
= assert (bvult (int2bv a) (int2bv b) ==> fact1 a b)
by (T.norm [delta_only [`%fact1;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'";
smt())
let lem_ult_2 (a b:uint_t 64)
: squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
= assert (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
by (T.norm [delta_only [`%fact0;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'")
let int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b))
= introduce (a < b) ==> (bvult #n (int2bv #n a) (int2bv #n b))
with _ . FStar.BV.int2bv_lemma_ult_1 a b;
introduce (bvult #n (int2bv #n a) (int2bv #n b)) ==> (a < b)
with _ . FStar.BV.int2bv_lemma_ult_2 a b
let lem_ult (a b:uint_t 64)
: Lemma (if a < b
then fact1 a b
else fact0 a b)
= int2bv_ult a b;
lem_ult_1 a b;
lem_ult_2 a b
let constant_time_carry (a b: U64.t) : Tot U64.t =
let open U64 in
// CONSTANT_TIME_CARRY macro
// ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
// 63 = sizeof(a) * 8 - 1
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63ul
let carry_uint64_equiv (a b:UInt64.t)
: Lemma (U64.v (constant_time_carry a b) == carry_uint64 (U64.v a) (U64.v b))
= ()
// This type gets a special treatment in KaRaMeL and its definition is never
// printed in the resulting C file.
type uint128: Type0 = { low: U64.t; high: U64.t }
let t = uint128
let _ = intro_ambient n
let _ = intro_ambient t
[@@ noextract_to "krml"]
let v x = U64.v x.low + (U64.v x.high) * (pow2 64)
let div_mod (x:nat) (k:nat{k > 0}) : Lemma (x / k * k + x % k == x) = ()
let uint_to_t x =
div_mod x (pow2 64);
{ low = U64.uint_to_t (x % (pow2 64));
high = U64.uint_to_t (x / (pow2 64)); }
let v_inj (x1 x2: t): Lemma (requires (v x1 == v x2))
(ensures x1 == x2) =
assert (uint_to_t (v x1) == uint_to_t (v x2));
assert (uint_to_t (v x1) == x1);
assert (uint_to_t (v x2) == x2);
()
(* A weird helper used below... seems like the native encoding of
bitvectors may be making these proofs much harder than they should be? *)
let bv2int_fun (#n:pos) (a b : bv_t n)
: Lemma (requires a == b)
(ensures bv2int a == bv2int b)
= ()
(* This proof is quite brittle. It has a bunch of annotations to get
decent verification performance. *)
let constant_time_carry_ok (a b:U64.t)
: Lemma (constant_time_carry a b ==
(if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0))
= calc (==) {
U64.v (constant_time_carry a b);
(==) { carry_uint64_equiv a b }
carry_uint64 (U64.v a) (U64.v b);
(==) { inverse_num_lemma (carry_uint64 (U64.v a) (U64.v b)) }
bv2int (int2bv (carry_uint64 (U64.v a) (U64.v b)));
(==) { carry_uint64_ok (U64.v a) (U64.v b);
bv2int_fun (int2bv (carry_uint64 (U64.v a) (U64.v b))) (carry_bv (U64.v a) (U64.v b));
()
}
bv2int (carry_bv (U64.v a) (U64.v b));
(==) {
lem_ult (U64.v a) (U64.v b);
bv2int_fun (carry_bv (U64.v a) (U64.v b)) (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
}
bv2int
(if U64.v a < U64.v b
then int2bv 1
else int2bv 0);
};
assert (
bv2int (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
== U64.v (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)) by (T.norm []);
U64.v_inj (constant_time_carry a b) (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)
let carry (a b: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r == (if U64.v a < U64.v b then 1 else 0))) =
constant_time_carry_ok a b;
constant_time_carry a b
let carry_sum_ok (a b:U64.t) :
Lemma (U64.v (carry (U64.add_mod a b) b) == (U64.v a + U64.v b) / (pow2 64)) = ()
let add (a b: t) : Pure t
(requires (v a + v b < pow2 128))
(ensures (fun r -> v a + v b = v r)) =
let l = U64.add_mod a.low b.low in
carry_sum_ok a.low b.low;
{ low = l;
high = U64.add (U64.add a.high b.high) (carry l b.low); }
let add_underspec (a b: t) =
let l = U64.add_mod a.low b.low in
begin
if v a + v b < pow2 128
then carry_sum_ok a.low b.low
else ()
end;
{ low = l;
high = U64.add_underspec (U64.add_underspec a.high b.high) (carry l b.low); }
val mod_mod: a:nat -> k:nat{k>0} -> k':nat{k'>0} ->
Lemma ((a % k) % (k'*k) == a % k)
let mod_mod a k k' =
assert (a % k < k);
assert (a % k < k' * k)
let mod_spec (a:nat) (k:nat{k > 0}) :
Lemma (a % k == a - a / k * k) = ()
val div_product : n:nat -> m1:nat{m1>0} -> m2:nat{m2>0} ->
Lemma (n / (m1*m2) == (n / m1) / m2)
let div_product n m1 m2 =
Math.division_multiplication_lemma n m1 m2
val mul_div_cancel : n:nat -> k:nat{k>0} ->
Lemma ((n * k) / k == n)
let mul_div_cancel n k =
Math.cancel_mul_div n k
val mod_mul: n:nat -> k1:pos -> k2:pos ->
Lemma ((n % k2) * k1 == (n * k1) % (k1*k2))
let mod_mul n k1 k2 =
Math.modulo_scale_lemma n k1 k2
let mod_spec_rew_n (n:nat) (k:nat{k > 0}) :
Lemma (n == n / k * k + n % k) = mod_spec n k
val mod_add: n1:nat -> n2:nat -> k:nat{k > 0} ->
Lemma ((n1 % k + n2 % k) % k == (n1 + n2) % k)
let mod_add n1 n2 k = Math.modulo_distributivity n1 n2 k
val mod_add_small: n1:nat -> n2:nat -> k:nat{k > 0} -> Lemma
(requires (n1 % k + n2 % k < k))
(ensures (n1 % k + n2 % k == (n1 + n2) % k))
let mod_add_small n1 n2 k =
mod_add n1 n2 k;
Math.small_modulo_lemma_1 (n1%k + n2%k) k
// This proof is pretty stable with the calc proof, but it can fail
// ~1% of the times, so add a retry.
#push-options "--split_queries no --z3rlimit 20 --retry 5"
let add_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a + v b) % pow2 128 = v r)) =
let l = U64.add_mod a.low b.low in
let r = { low = l;
high = U64.add_mod (U64.add_mod a.high b.high) (carry l b.low)} in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
let b_l = U64.v b.low in
let b_h = U64.v b.high in
carry_sum_ok a.low b.low;
Math.lemma_mod_plus_distr_l (a_h + b_h) ((a_l + b_l) / (pow2 64)) (pow2 64);
calc (==) {
U64.v r.high * pow2 64;
== {}
((a_h + b_h + (a_l + b_l) / (pow2 64)) % pow2 64) * pow2 64;
== { mod_mul (a_h + b_h + (a_l + b_l) / (pow2 64)) (pow2 64) (pow2 64) }
((a_h + b_h + (a_l + b_l) / (pow2 64)) * pow2 64) % (pow2 64 * pow2 64);
== {}
((a_h + b_h + (a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
== {}
(a_h * pow2 64 + b_h * pow2 64 + ((a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
};
assert (U64.v r.low == (U64.v r.low) % pow2 128);
mod_add_small (a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64))
((a_l + b_l) % (pow2 64))
(pow2 128);
assert (U64.v r.low + U64.v r.high * pow2 64 ==
(a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64) + (a_l + b_l) % (pow2 64)) % pow2 128);
mod_spec_rew_n (a_l + b_l) (pow2 64);
assert (v r ==
(a_h * pow2 64 +
b_h * pow2 64 +
a_l + b_l) % pow2 128);
assert_spinoff ((v a + v b) % pow2 128 = v r);
r
#pop-options
#push-options "--retry 5"
let sub (a b: t) : Pure t
(requires (v a - v b >= 0))
(ensures (fun r -> v r = v a - v b)) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub (U64.sub a.high b.high) (carry a.low l); }
#pop-options
let sub_underspec (a b: t) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_underspec (U64.sub_underspec a.high b.high) (carry a.low l); }
let sub_mod_impl (a b: t) : t =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_mod (U64.sub_mod a.high b.high) (carry a.low l); }
#push-options "--retry 10" // flaky
let sub_mod_pos_ok (a b:t) : Lemma
(requires (v a - v b >= 0))
(ensures (v (sub_mod_impl a b) = v a - v b)) =
assert (sub a b == sub_mod_impl a b);
()
#pop-options
val u64_diff_wrap : a:U64.t -> b:U64.t ->
Lemma (requires (U64.v a < U64.v b))
(ensures (U64.v (U64.sub_mod a b) == U64.v a - U64.v b + pow2 64))
let u64_diff_wrap a b = ()
#push-options "--z3rlimit 20"
val sub_mod_wrap1_ok : a:t -> b:t -> Lemma
(requires (v a - v b < 0 /\ U64.v a.low < U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128))
#push-options "--retry 10"
let sub_mod_wrap1_ok a b =
// carry == 1 and subtraction in low wraps
let l = U64.sub_mod a.low b.low in
assert (U64.v (carry a.low l) == 1);
u64_diff_wrap a.low b.low;
// a.high <= b.high since v a < v b;
// case split on equality and strictly less
if U64.v a.high = U64.v b.high then ()
else begin
u64_diff_wrap a.high b.high;
()
end
#pop-options
let sum_lt (a1 a2 b1 b2:nat) : Lemma
(requires (a1 + a2 < b1 + b2 /\ a1 >= b1))
(ensures (a2 < b2)) = ()
let sub_mod_wrap2_ok (a b:t) : Lemma
(requires (v a - v b < 0 /\ U64.v a.low >= U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
// carry == 0, subtraction in low is exact, but subtraction in high
// must wrap since v a < v b
let l = U64.sub_mod a.low b.low in
let r = sub_mod_impl a b in
assert (U64.v l == U64.v a.low - U64.v b.low);
assert (U64.v (carry a.low l) == 0);
sum_lt (U64.v a.low) (U64.v a.high * pow2 64) (U64.v b.low) (U64.v b.high * pow2 64);
assert (U64.v (U64.sub_mod a.high b.high) == U64.v a.high - U64.v b.high + pow2 64);
()
let sub_mod_wrap_ok (a b:t) : Lemma
(requires (v a - v b < 0))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
if U64.v a.low < U64.v b.low
then sub_mod_wrap1_ok a b
else sub_mod_wrap2_ok a b
#push-options "--z3rlimit 40"
let sub_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = (v a - v b) % pow2 128)) =
(if v a - v b >= 0
then sub_mod_pos_ok a b
else sub_mod_wrap_ok a b);
sub_mod_impl a b
#pop-options
val shift_bound : #n:nat -> num:UInt.uint_t n -> n':nat ->
Lemma (num * pow2 n' <= pow2 (n'+n) - pow2 n')
let shift_bound #n num n' =
Math.lemma_mult_le_right (pow2 n') num (pow2 n - 1);
Math.distributivity_sub_left (pow2 n) 1 (pow2 n');
Math.pow2_plus n' n
val append_uint : #n1:nat -> #n2:nat -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 -> UInt.uint_t (n1+n2)
let append_uint #n1 #n2 num1 num2 =
shift_bound num2 n1;
num1 + num2 * pow2 n1
val to_vec_append : #n1:nat{n1 > 0} -> #n2:nat{n2 > 0} -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 ->
Lemma (UInt.to_vec (append_uint num1 num2) == Seq.append (UInt.to_vec num2) (UInt.to_vec num1))
let to_vec_append #n1 #n2 num1 num2 =
UInt.append_lemma (UInt.to_vec num2) (UInt.to_vec num1)
let vec128 (a: t) : BV.bv_t 128 = UInt.to_vec #128 (v a)
let vec64 (a: U64.t) : BV.bv_t 64 = UInt.to_vec (U64.v a)
let to_vec_v (a: t) :
Lemma (vec128 a == Seq.append (vec64 a.high) (vec64 a.low)) =
to_vec_append (U64.v a.low) (U64.v a.high)
val logand_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2) ==
BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2))
(BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logand #128 (v a) (v b))) =
let r = { low = U64.logand a.low b.low;
high = U64.logand a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logand_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logand_vec (vec128 a) (vec128 b));
r
val logxor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2) ==
BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2))
(BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logxor #128 (v a) (v b))) =
let r = { low = U64.logxor a.low b.low;
high = U64.logxor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logxor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logxor_vec (vec128 a) (vec128 b));
r
val logor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2) ==
BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2))
(BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logor #128 (v a) (v b))) =
let r = { low = U64.logor a.low b.low;
high = U64.logor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logor_vec (vec128 a) (vec128 b));
r
val lognot_vec_append (#n1 #n2: pos) (a1: BV.bv_t n1) (a2: BV.bv_t n2) :
Lemma (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2) ==
BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot_vec_append #n1 #n2 a1 a2 =
Seq.lemma_eq_intro (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2))
(BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot (a: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.lognot #128 (v a))) =
let r = { low = U64.lognot a.low;
high = U64.lognot a.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
lognot_vec_append (vec64 a.high) (vec64 a.low);
to_vec_v a;
assert (vec128 r == BV.lognot_vec (vec128 a));
r
let mod_mul_cancel (n:nat) (k:nat{k > 0}) :
Lemma ((n * k) % k == 0) =
mod_spec (n * k) k;
mul_div_cancel n k;
()
let shift_past_mod (n:nat) (k1:nat) (k2:nat{k2 >= k1}) :
Lemma ((n * pow2 k2) % pow2 k1 == 0) =
assert (k2 == (k2 - k1) + k1);
Math.pow2_plus (k2 - k1) k1;
Math.paren_mul_right n (pow2 (k2 - k1)) (pow2 k1);
mod_mul_cancel (n * pow2 (k2 - k1)) (pow2 k1)
val mod_double: a:nat -> k:nat{k>0} ->
Lemma (a % k % k == a % k)
let mod_double a k =
mod_mod a k 1
let shift_left_large_val (#n1:nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s:nat) :
Lemma ((a1 + a2 * pow2 n1) * pow2 s == (a1 * pow2 s + a2 * pow2 (n1+s))) =
Math.distributivity_add_left a1 (a2 * pow2 n1) (pow2 s);
Math.paren_mul_right a2 (pow2 n1) (pow2 s);
Math.pow2_plus n1 s
#push-options "--z3rlimit 40"
let shift_left_large_lemma (#n1: nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s: nat{s >= n2}) :
Lemma (((a1 + a2 * pow2 n1) * pow2 s) % pow2 (n1+n2) ==
(a1 * pow2 s) % pow2 (n1+n2)) =
shift_left_large_val a1 a2 s;
mod_add (a1 * pow2 s) (a2 * pow2 (n1+s)) (pow2 (n1+n2));
shift_past_mod a2 (n1+n2) (n1+s);
mod_double (a1 * pow2 s) (pow2 (n1+n2));
()
#pop-options
val shift_left_large_lemma_t : a:t -> s:nat ->
Lemma (requires (s >= 64))
(ensures ((v a * pow2 s) % pow2 128 ==
(U64.v a.low * pow2 s) % pow2 128))
let shift_left_large_lemma_t a s =
shift_left_large_lemma #64 #64 (U64.v a.low) (U64.v a.high) s
private let u32_64: n:U32.t{U32.v n == 64} = U32.uint_to_t 64
val div_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} -> Lemma
(requires True)
(ensures (a / pow2 (n1 - n2) == a * pow2 n2 / pow2 n1))
let div_pow2_diff a n1 n2 =
Math.pow2_plus n2 (n1-n2);
assert (a * pow2 n2 / pow2 n1 == a * pow2 n2 / (pow2 n2 * pow2 (n1 - n2)));
div_product (a * pow2 n2) (pow2 n2) (pow2 (n1-n2));
mul_div_cancel a (pow2 n2)
val mod_mul_pow2 : n:nat -> e1:nat -> e2:nat ->
Lemma (n % pow2 e1 * pow2 e2 <= pow2 (e1+e2) - pow2 e2)
let mod_mul_pow2 n e1 e2 =
Math.lemma_mod_lt n (pow2 e1);
Math.lemma_mult_le_right (pow2 e2) (n % pow2 e1) (pow2 e1 - 1);
assert (n % pow2 e1 * pow2 e2 <= pow2 e1 * pow2 e2 - pow2 e2);
Math.pow2_plus e1 e2
let pow2_div_bound #b (n:UInt.uint_t b) (s:nat{s <= b}) :
Lemma (n / pow2 s < pow2 (b - s)) =
Math.lemma_div_lt n b s
#push-options "--smtencoding.l_arith_repr native --z3rlimit 40"
let add_u64_shift_left (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r = (U64.v hi * pow2 (U32.v s)) % pow2 64 + U64.v lo / pow2 (64 - U32.v s))) =
let high = U64.shift_left hi s in
let low = U64.shift_right lo (U32.sub u32_64 s) in
let s = U32.v s in
let high_n = U64.v hi % pow2 (64 - s) * pow2 s in
let low_n = U64.v lo / pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 s) (pow2 (64-s));
assert (U64.v high == high_n);
assert (U64.v low == low_n);
pow2_div_bound (U64.v lo) (64-s);
assert (low_n < pow2 s);
mod_mul_pow2 (U64.v hi) (64 - s) s;
U64.add high low
#pop-options
let div_plus_multiple (a:nat) (b:nat) (k:pos) :
Lemma (requires (a < k))
(ensures ((a + b * k) / k == b)) =
Math.division_addition_lemma a k b;
Math.small_division_lemma_1 a k
val div_add_small: n:nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (k1*m / (k1*k2) == (n + k1*m) / (k1*k2)))
let div_add_small n m k1 k2 =
div_product (k1*m) k1 k2;
div_product (n+k1*m) k1 k2;
mul_div_cancel m k1;
assert (k1*m/k1 == m);
div_plus_multiple n m k1
val add_mod_small: n: nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (n + (k1 * m) % (k1 * k2) ==
(n + k1 * m) % (k1 * k2)))
let add_mod_small n m k1 k2 =
mod_spec (k1 * m) (k1 * k2);
mod_spec (n + k1 * m) (k1 * k2);
div_add_small n m k1 k2
let mod_then_mul_64 (n:nat) : Lemma (n % pow2 64 * pow2 64 == n * pow2 64 % pow2 128) =
Math.pow2_plus 64 64;
mod_mul n (pow2 64) (pow2 64)
let mul_abc_to_acb (a b c: int) : Lemma (a * b * c == a * c * b) = ()
let add_u64_shift_left_respec (hi lo:U64.t) (s:U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r ->
U64.v r * pow2 64 ==
(U64.v hi * pow2 64) * pow2 (U32.v s) % pow2 128 +
U64.v lo * pow2 (U32.v s) / pow2 64 * pow2 64)) =
let r = add_u64_shift_left hi lo s in
let hi = U64.v hi in
let lo = U64.v lo in
let s = U32.v s in
// spec of add_u64_shift_left
assert (U64.v r == hi * pow2 s % pow2 64 + lo / pow2 (64 - s));
Math.distributivity_add_left (hi * pow2 s % pow2 64) (lo / pow2 (64-s)) (pow2 64);
mod_then_mul_64 (hi * pow2 s);
assert (hi * pow2 s % pow2 64 * pow2 64 == (hi * pow2 s * pow2 64) % pow2 128);
div_pow2_diff lo 64 s;
assert (lo / pow2 (64-s) == lo * pow2 s / pow2 64);
assert (U64.v r * pow2 64 == hi * pow2 s * pow2 64 % pow2 128 + lo * pow2 s / pow2 64 * pow2 64);
mul_abc_to_acb hi (pow2 s) (pow2 64);
r
val add_mod_small' : n:nat -> m:nat -> k:pos ->
Lemma (requires (n + m % k < k))
(ensures (n + m % k == (n + m) % k))
let add_mod_small' n m k =
Math.lemma_mod_lt (n + m % k) k;
Math.modulo_lemma n k;
mod_add n m k
#push-options "--retry 5"
let shift_t_val (a: t) (s: nat) :
Lemma (v a * pow2 s == U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s)) =
Math.pow2_plus 64 s;
()
#pop-options
val mul_mod_bound : n:nat -> s1:nat -> s2:nat{s2>=s1} ->
Lemma (n * pow2 s1 % pow2 s2 <= pow2 s2 - pow2 s1)
#push-options "--retry 5"
let mul_mod_bound n s1 s2 =
// n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1
// n % pow2 (s2-s1) <= pow2 (s2-s1) - 1
// n % pow2 (s2-s1) * pow2 s1 <= pow2 s2 - pow2 s1
mod_mul n (pow2 s1) (pow2 (s2-s1));
// assert (n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1);
Math.lemma_mod_lt n (pow2 (s2-s1));
Math.lemma_mult_le_right (pow2 s1) (n % pow2 (s2-s1)) (pow2 (s2-s1) - 1);
Math.pow2_plus (s2-s1) s1
#pop-options
let add_lt_le (a a' b b': int) :
Lemma (requires (a < a' /\ b <= b'))
(ensures (a + b < a' + b')) = ()
let u64_pow2_bound (a: UInt.uint_t 64) (s: nat) :
Lemma (a * pow2 s < pow2 (64+s)) =
Math.pow2_plus 64 s;
Math.lemma_mult_le_right (pow2 s) a (pow2 64)
let shift_t_mod_val' (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s) % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
u64_pow2_bound a_l s;
mul_mod_bound a_h (64+s) 128;
// assert (a_h * pow2 (64+s) % pow2 128 <= pow2 128 - pow2 (64+s));
add_lt_le (a_l * pow2 s) (pow2 (64+s)) (a_h * pow2 (64+s) % pow2 128) (pow2 128 - pow2 (64+s));
add_mod_small' (a_l * pow2 s) (a_h * pow2 (64+s)) (pow2 128);
shift_t_val a s;
()
let shift_t_mod_val (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + (U64.v a.high * pow2 64) * pow2 s % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
shift_t_mod_val' a s;
Math.pow2_plus 64 s;
Math.paren_mul_right a_h (pow2 64) (pow2 s);
()
#push-options "--z3rlimit 20"
let shift_left_small (a: t) (s: U32.t) : Pure t
(requires (U32.v s < 64))
(ensures (fun r -> v r = (v a * pow2 (U32.v s)) % pow2 128)) =
if U32.eq s 0ul then a
else
let r = { low = U64.shift_left a.low s;
high = add_u64_shift_left_respec a.high a.low s; } in
let s = U32.v s in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
mod_spec_rew_n (a_l * pow2 s) (pow2 64);
shift_t_mod_val a s;
r
#pop-options
val shift_left_large : a:t -> s:U32.t{U32.v s >= 64 /\ U32.v s < 128} ->
r:t{v r = (v a * pow2 (U32.v s)) % pow2 128}
#push-options "--z3rlimit 50 --retry 5" // sporadically fails
let shift_left_large a s =
let h_shift = U32.sub s u32_64 in
assert (U32.v h_shift < 64);
let r = { low = U64.uint_to_t 0;
high = U64.shift_left a.low h_shift; } in
assert (U64.v r.high == (U64.v a.low * pow2 (U32.v s - 64)) % pow2 64);
mod_mul (U64.v a.low * pow2 (U32.v s - 64)) (pow2 64) (pow2 64);
Math.pow2_plus (U32.v s - 64) 64;
assert (U64.v r.high * pow2 64 == (U64.v a.low * pow2 (U32.v s)) % pow2 128);
shift_left_large_lemma_t a (U32.v s);
r
#pop-options
let shift_left a s =
if (U32.lt s u32_64) then shift_left_small a s
else shift_left_large a s
let add_u64_shift_right (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r == U64.v lo / pow2 (U32.v s) +
U64.v hi * pow2 (64 - U32.v s) % pow2 64)) =
let low = U64.shift_right lo s in
let high = U64.shift_left hi (U32.sub u32_64 s) in
let s = U32.v s in
let low_n = U64.v lo / pow2 s in
let high_n = U64.v hi % pow2 s * pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 (64-s)) (pow2 s);
assert (U64.v high == high_n);
pow2_div_bound (U64.v lo) s;
assert (low_n < pow2 (64 - s));
mod_mul_pow2 (U64.v hi) s (64 - s);
U64.add low high
val mul_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} ->
Lemma (a * pow2 (n1 - n2) == a * pow2 n1 / pow2 n2)
let mul_pow2_diff a n1 n2 =
Math.paren_mul_right a (pow2 (n1-n2)) (pow2 n2);
mul_div_cancel (a * pow2 (n1 - n2)) (pow2 n2);
Math.pow2_plus (n1 - n2) n2
let add_u64_shift_right_respec (hi lo:U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r == U64.v lo / pow2 (U32.v s) +
U64.v hi * pow2 64 / pow2 (U32.v s) % pow2 64)) =
let r = add_u64_shift_right hi lo s in
let s = U32.v s in
mul_pow2_diff (U64.v hi) 64 s;
r
let mul_div_spec (n:nat) (k:pos) : Lemma (n / k * k == n - n % k) = ()
let mul_distr_sub (n1 n2:nat) (k:nat) : Lemma ((n1 - n2) * k == n1 * k - n2 * k) = ()
val div_product_comm : n1:nat -> k1:pos -> k2:pos ->
Lemma (n1 / k1 / k2 == n1 / k2 / k1)
let div_product_comm n1 k1 k2 =
div_product n1 k1 k2;
div_product n1 k2 k1
val shift_right_reconstruct : a_h:UInt.uint_t 64 -> s:nat{s < 64} ->
Lemma (a_h * pow2 (64-s) == a_h / pow2 s * pow2 64 + a_h * pow2 64 / pow2 s % pow2 64)
let shift_right_reconstruct a_h s =
mul_pow2_diff a_h 64 s;
mod_spec_rew_n (a_h * pow2 (64-s)) (pow2 64);
div_product_comm (a_h * pow2 64) (pow2 s) (pow2 64);
mul_div_cancel a_h (pow2 64);
assert (a_h / pow2 s * pow2 64 == a_h * pow2 64 / pow2 s / pow2 64 * pow2 64);
()
val u128_div_pow2 (a: t) (s:nat{s < 64}) :
Lemma (v a / pow2 s == U64.v a.low / pow2 s + U64.v a.high * pow2 (64 - s))
let u128_div_pow2 a s =
Math.pow2_plus (64-s) s;
Math.paren_mul_right (U64.v a.high) (pow2 (64-s)) (pow2 s);
Math.division_addition_lemma (U64.v a.low) (pow2 s) (U64.v a.high * pow2 (64 - s))
let shift_right_small (a: t) (s: U32.t{U32.v s < 64}) : Pure t
(requires True)
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
if U32.eq s 0ul then a
else
let r = { low = add_u64_shift_right_respec a.high a.low s;
high = U64.shift_right a.high s; } in
let a_h = U64.v a.high in
let a_l = U64.v a.low in
let s = U32.v s in
shift_right_reconstruct a_h s;
assert (v r == a_h * pow2 (64-s) + a_l / pow2 s);
u128_div_pow2 a s;
r
let shift_right_large (a: t) (s: U32.t{U32.v s >= 64 /\ U32.v s < 128}) : Pure t
(requires True)
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
let r = { high = U64.uint_to_t 0;
low = U64.shift_right a.high (U32.sub s u32_64); } in
let s = U32.v s in
Math.pow2_plus 64 (s - 64);
div_product (v a) (pow2 64) (pow2 (s - 64));
assert (v a / pow2 s == v a / pow2 64 / pow2 (s - 64));
div_plus_multiple (U64.v a.low) (U64.v a.high) (pow2 64);
r
let shift_right (a: t) (s: U32.t) : Pure t
(requires (U32.v s < 128))
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
if U32.lt s u32_64
then shift_right_small a s
else shift_right_large a s
let eq (a b:t) = U64.eq a.low b.low && U64.eq a.high b.high
let gt (a b:t) = U64.gt a.high b.high ||
(U64.eq a.high b.high && U64.gt a.low b.low)
let lt (a b:t) = U64.lt a.high b.high ||
(U64.eq a.high b.high && U64.lt a.low b.low)
let gte (a b:t) = U64.gt a.high b.high ||
(U64.eq a.high b.high && U64.gte a.low b.low)
let lte (a b:t) = U64.lt a.high b.high ||
(U64.eq a.high b.high && U64.lte a.low b.low)
let u64_logand_comm (a b:U64.t) : Lemma (U64.logand a b == U64.logand b a) =
UInt.logand_commutative (U64.v a) (U64.v b)
val u64_and_0 (a b:U64.t) :
Lemma (U64.v b = 0 ==> U64.v (U64.logand a b) = 0)
[SMTPat (U64.logand a b)]
let u64_and_0 a b = UInt.logand_lemma_1 (U64.v a)
let u64_0_and (a b:U64.t) :
Lemma (U64.v a = 0 ==> U64.v (U64.logand a b) = 0)
[SMTPat (U64.logand a b)] =
u64_logand_comm a b
val u64_1s_and (a b:U64.t) :
Lemma (U64.v a = pow2 64 - 1 /\
U64.v b = pow2 64 - 1 ==> U64.v (U64.logand a b) = pow2 64 - 1)
[SMTPat (U64.logand a b)]
let u64_1s_and a b = UInt.logand_lemma_2 (U64.v a)
let eq_mask (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a = v b ==> v r = pow2 128 - 1) /\ (v a <> v b ==> v r = 0))) =
let mask = U64.logand (U64.eq_mask a.low b.low)
(U64.eq_mask a.high b.high) in
{ low = mask; high = mask; }
private let gte_characterization (a b: t) :
Lemma (v a >= v b ==>
U64.v a.high > U64.v b.high \/
(U64.v a.high = U64.v b.high /\ U64.v a.low >= U64.v b.low)) = ()
private let lt_characterization (a b: t) :
Lemma (v a < v b ==>
U64.v a.high < U64.v b.high \/
(U64.v a.high = U64.v b.high /\ U64.v a.low < U64.v b.low)) = ()
let u64_logor_comm (a b:U64.t) : Lemma (U64.logor a b == U64.logor b a) =
UInt.logor_commutative (U64.v a) (U64.v b)
val u64_or_1 (a b:U64.t) :
Lemma (U64.v b = pow2 64 - 1 ==> U64.v (U64.logor a b) = pow2 64 - 1)
[SMTPat (U64.logor a b)]
let u64_or_1 a b = UInt.logor_lemma_2 (U64.v a)
let u64_1_or (a b:U64.t) :
Lemma (U64.v a = pow2 64 - 1 ==> U64.v (U64.logor a b) = pow2 64 - 1)
[SMTPat (U64.logor a b)] =
u64_logor_comm a b
val u64_or_0 (a b:U64.t) :
Lemma (U64.v a = 0 /\ U64.v b = 0 ==> U64.v (U64.logor a b) = 0)
[SMTPat (U64.logor a b)]
let u64_or_0 a b = UInt.logor_lemma_1 (U64.v a)
val u64_not_0 (a:U64.t) :
Lemma (U64.v a = 0 ==> U64.v (U64.lognot a) = pow2 64 - 1)
[SMTPat (U64.lognot a)]
let u64_not_0 a = UInt.lognot_lemma_1 #64
val u64_not_1 (a:U64.t) :
Lemma (U64.v a = pow2 64 - 1 ==> U64.v (U64.lognot a) = 0)
[SMTPat (U64.lognot a)]
let u64_not_1 a =
UInt.nth_lemma (UInt.lognot #64 (UInt.ones 64)) (UInt.zero 64)
let gte_mask (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a >= v b ==> v r = pow2 128 - 1) /\ (v a < v b ==> v r = 0))) =
let mask_hi_gte = U64.logand (U64.gte_mask a.high b.high)
(U64.lognot (U64.eq_mask a.high b.high)) in
let mask_lo_gte = U64.logand (U64.eq_mask a.high b.high)
(U64.gte_mask a.low b.low) in
let mask = U64.logor mask_hi_gte mask_lo_gte in
gte_characterization a b;
lt_characterization a b;
{ low = mask; high = mask; }
let uint64_to_uint128 (a:U64.t) = { low = a; high = U64.uint_to_t 0; }
let uint128_to_uint64 (a:t) : b:U64.t{U64.v b == v a % pow2 64} = a.low
inline_for_extraction
let u64_l32_mask: x:U64.t{U64.v x == pow2 32 - 1} = U64.uint_to_t 0xffffffff
let u64_mod_32 (a: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r = U64.v a % pow2 32)) =
UInt.logand_mask (U64.v a) 32;
U64.logand a u64_l32_mask
let u64_32_digits (a: U64.t) : Lemma (U64.v a / pow2 32 * pow2 32 + U64.v a % pow2 32 == U64.v a) =
div_mod (U64.v a) (pow2 32)
val mul32_digits : x:UInt.uint_t 64 -> y:UInt.uint_t 32 ->
Lemma (x * y == (x / pow2 32 * y) * pow2 32 + (x % pow2 32) * y)
let mul32_digits x y = ()
let u32_32 : x:U32.t{U32.v x == 32} = U32.uint_to_t 32
#push-options "--z3rlimit 30"
let u32_combine (hi lo: U64.t) : Pure U64.t
(requires (U64.v lo < pow2 32))
(ensures (fun r -> U64.v r = U64.v hi % pow2 32 * pow2 32 + U64.v lo)) =
U64.add lo (U64.shift_left hi u32_32)
#pop-options
let product_bound (a b:nat) (k:pos) :
Lemma (requires (a < k /\ b < k))
(ensures a * b <= k * k - 2*k + 1) =
Math.lemma_mult_le_right b a (k-1);
Math.lemma_mult_le_left (k-1) b (k-1)
val uint_product_bound : #n:nat -> a:UInt.uint_t n -> b:UInt.uint_t n ->
Lemma (a * b <= pow2 (2*n) - 2*(pow2 n) + 1)
let uint_product_bound #n a b =
product_bound a b (pow2 n);
Math.pow2_plus n n
val u32_product_bound : a:nat{a < pow2 32} -> b:nat{b < pow2 32} ->
Lemma (UInt.size (a * b) 64 /\ a * b < pow2 64 - pow2 32 - 1)
let u32_product_bound a b =
uint_product_bound #32 a b
let mul32 x y =
let x0 = u64_mod_32 x in
let x1 = U64.shift_right x u32_32 in
u32_product_bound (U64.v x0) (U32.v y);
let x0y = U64.mul x0 (FStar.Int.Cast.uint32_to_uint64 y) in
let x0yl = u64_mod_32 x0y in
let x0yh = U64.shift_right x0y u32_32 in
u32_product_bound (U64.v x1) (U32.v y);
// not in the original C code
let x1y' = U64.mul x1 (FStar.Int.Cast.uint32_to_uint64 y) in
let x1y = U64.add x1y' x0yh in
// correspondence with C:
// r0 = r.low
// r0 is written using u32_combine hi lo = lo + hi << 32
// r1 = r.high
let r = { low = u32_combine x1y x0yl;
high = U64.shift_right x1y u32_32; } in
u64_32_digits x;
//assert (U64.v x == U64.v x1 * pow2 32 + U64.v x0);
assert (U64.v x0y == U64.v x0 * U32.v y);
u64_32_digits x0y;
//assert (U64.v x0y == U64.v x0yh * pow2 32 + U64.v x0yl);
assert (U64.v x1y' == U64.v x / pow2 32 * U32.v y);
mul32_digits (U64.v x) (U32.v y);
assert (U64.v x * U32.v y == U64.v x1y' * pow2 32 + U64.v x0y);
r
let l32 (x: UInt.uint_t 64) : UInt.uint_t 32 = x % pow2 32
let h32 (x: UInt.uint_t 64) : UInt.uint_t 32 = x / pow2 32
val mul32_bound : x:UInt.uint_t 32 -> y:UInt.uint_t 32 ->
n:UInt.uint_t 64{n < pow2 64 - pow2 32 - 1 /\ n == x * y}
let mul32_bound x y =
u32_product_bound x y;
x * y
let pll (x y: U64.t) : n:UInt.uint_t 64{n < pow2 64 - pow2 32 - 1} =
mul32_bound (l32 (U64.v x)) (l32 (U64.v y))
let plh (x y: U64.t) : n:UInt.uint_t 64{n < pow2 64 - pow2 32 - 1} =
mul32_bound (l32 (U64.v x)) (h32 (U64.v y))
let phl (x y: U64.t) : n:UInt.uint_t 64{n < pow2 64 - pow2 32 - 1} =
mul32_bound (h32 (U64.v x)) (l32 (U64.v y))
let phh (x y: U64.t) : n:UInt.uint_t 64{n < pow2 64 - pow2 32 - 1} =
mul32_bound (h32 (U64.v x)) (h32 (U64.v y))
let pll_l (x y: U64.t) : UInt.uint_t 32 =
l32 (pll x y)
let pll_h (x y: U64.t) : UInt.uint_t 32 =
h32 (pll x y)
let mul_wide_low (x y: U64.t) = (plh x y + (phl x y + pll_h x y) % pow2 32) * pow2 32 % pow2 64 + pll_l x y | false | true | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val mul_wide_high : x: FStar.UInt64.t -> y: FStar.UInt64.t -> Prims.int | [] | FStar.UInt128.mul_wide_high | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | x: FStar.UInt64.t -> y: FStar.UInt64.t -> Prims.int | {
"end_col": 57,
"end_line": 995,
"start_col": 2,
"start_line": 993
} |
|
Prims.Tot | val uint64_to_uint128: a:U64.t -> b:t{v b == U64.v a} | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let uint64_to_uint128 (a:U64.t) = { low = a; high = U64.uint_to_t 0; } | val uint64_to_uint128: a:U64.t -> b:t{v b == U64.v a}
let uint64_to_uint128 (a: U64.t) = | false | null | false | { low = a; high = U64.uint_to_t 0 } | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [
"total"
] | [
"FStar.UInt64.t",
"FStar.UInt128.Mkuint128",
"FStar.UInt64.uint_to_t",
"FStar.UInt128.t",
"Prims.eq2",
"Prims.int",
"Prims.l_or",
"FStar.UInt.size",
"FStar.UInt128.n",
"FStar.UInt64.n",
"FStar.UInt128.v",
"FStar.UInt64.v"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract
let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63
let carry_uint64_ok (a b:uint_t 64)
: squash (int2bv (carry_uint64 a b) == carry_bv a b)
= _ by (T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl())
let fact1 (a b: uint_t 64) = carry_bv a b == int2bv 1
let fact0 (a b: uint_t 64) = carry_bv a b == int2bv 0
let lem_ult_1 (a b: uint_t 64)
: squash (bvult (int2bv a) (int2bv b) ==> fact1 a b)
= assert (bvult (int2bv a) (int2bv b) ==> fact1 a b)
by (T.norm [delta_only [`%fact1;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'";
smt())
let lem_ult_2 (a b:uint_t 64)
: squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
= assert (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
by (T.norm [delta_only [`%fact0;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'")
let int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b))
= introduce (a < b) ==> (bvult #n (int2bv #n a) (int2bv #n b))
with _ . FStar.BV.int2bv_lemma_ult_1 a b;
introduce (bvult #n (int2bv #n a) (int2bv #n b)) ==> (a < b)
with _ . FStar.BV.int2bv_lemma_ult_2 a b
let lem_ult (a b:uint_t 64)
: Lemma (if a < b
then fact1 a b
else fact0 a b)
= int2bv_ult a b;
lem_ult_1 a b;
lem_ult_2 a b
let constant_time_carry (a b: U64.t) : Tot U64.t =
let open U64 in
// CONSTANT_TIME_CARRY macro
// ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
// 63 = sizeof(a) * 8 - 1
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63ul
let carry_uint64_equiv (a b:UInt64.t)
: Lemma (U64.v (constant_time_carry a b) == carry_uint64 (U64.v a) (U64.v b))
= ()
// This type gets a special treatment in KaRaMeL and its definition is never
// printed in the resulting C file.
type uint128: Type0 = { low: U64.t; high: U64.t }
let t = uint128
let _ = intro_ambient n
let _ = intro_ambient t
[@@ noextract_to "krml"]
let v x = U64.v x.low + (U64.v x.high) * (pow2 64)
let div_mod (x:nat) (k:nat{k > 0}) : Lemma (x / k * k + x % k == x) = ()
let uint_to_t x =
div_mod x (pow2 64);
{ low = U64.uint_to_t (x % (pow2 64));
high = U64.uint_to_t (x / (pow2 64)); }
let v_inj (x1 x2: t): Lemma (requires (v x1 == v x2))
(ensures x1 == x2) =
assert (uint_to_t (v x1) == uint_to_t (v x2));
assert (uint_to_t (v x1) == x1);
assert (uint_to_t (v x2) == x2);
()
(* A weird helper used below... seems like the native encoding of
bitvectors may be making these proofs much harder than they should be? *)
let bv2int_fun (#n:pos) (a b : bv_t n)
: Lemma (requires a == b)
(ensures bv2int a == bv2int b)
= ()
(* This proof is quite brittle. It has a bunch of annotations to get
decent verification performance. *)
let constant_time_carry_ok (a b:U64.t)
: Lemma (constant_time_carry a b ==
(if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0))
= calc (==) {
U64.v (constant_time_carry a b);
(==) { carry_uint64_equiv a b }
carry_uint64 (U64.v a) (U64.v b);
(==) { inverse_num_lemma (carry_uint64 (U64.v a) (U64.v b)) }
bv2int (int2bv (carry_uint64 (U64.v a) (U64.v b)));
(==) { carry_uint64_ok (U64.v a) (U64.v b);
bv2int_fun (int2bv (carry_uint64 (U64.v a) (U64.v b))) (carry_bv (U64.v a) (U64.v b));
()
}
bv2int (carry_bv (U64.v a) (U64.v b));
(==) {
lem_ult (U64.v a) (U64.v b);
bv2int_fun (carry_bv (U64.v a) (U64.v b)) (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
}
bv2int
(if U64.v a < U64.v b
then int2bv 1
else int2bv 0);
};
assert (
bv2int (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
== U64.v (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)) by (T.norm []);
U64.v_inj (constant_time_carry a b) (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)
let carry (a b: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r == (if U64.v a < U64.v b then 1 else 0))) =
constant_time_carry_ok a b;
constant_time_carry a b
let carry_sum_ok (a b:U64.t) :
Lemma (U64.v (carry (U64.add_mod a b) b) == (U64.v a + U64.v b) / (pow2 64)) = ()
let add (a b: t) : Pure t
(requires (v a + v b < pow2 128))
(ensures (fun r -> v a + v b = v r)) =
let l = U64.add_mod a.low b.low in
carry_sum_ok a.low b.low;
{ low = l;
high = U64.add (U64.add a.high b.high) (carry l b.low); }
let add_underspec (a b: t) =
let l = U64.add_mod a.low b.low in
begin
if v a + v b < pow2 128
then carry_sum_ok a.low b.low
else ()
end;
{ low = l;
high = U64.add_underspec (U64.add_underspec a.high b.high) (carry l b.low); }
val mod_mod: a:nat -> k:nat{k>0} -> k':nat{k'>0} ->
Lemma ((a % k) % (k'*k) == a % k)
let mod_mod a k k' =
assert (a % k < k);
assert (a % k < k' * k)
let mod_spec (a:nat) (k:nat{k > 0}) :
Lemma (a % k == a - a / k * k) = ()
val div_product : n:nat -> m1:nat{m1>0} -> m2:nat{m2>0} ->
Lemma (n / (m1*m2) == (n / m1) / m2)
let div_product n m1 m2 =
Math.division_multiplication_lemma n m1 m2
val mul_div_cancel : n:nat -> k:nat{k>0} ->
Lemma ((n * k) / k == n)
let mul_div_cancel n k =
Math.cancel_mul_div n k
val mod_mul: n:nat -> k1:pos -> k2:pos ->
Lemma ((n % k2) * k1 == (n * k1) % (k1*k2))
let mod_mul n k1 k2 =
Math.modulo_scale_lemma n k1 k2
let mod_spec_rew_n (n:nat) (k:nat{k > 0}) :
Lemma (n == n / k * k + n % k) = mod_spec n k
val mod_add: n1:nat -> n2:nat -> k:nat{k > 0} ->
Lemma ((n1 % k + n2 % k) % k == (n1 + n2) % k)
let mod_add n1 n2 k = Math.modulo_distributivity n1 n2 k
val mod_add_small: n1:nat -> n2:nat -> k:nat{k > 0} -> Lemma
(requires (n1 % k + n2 % k < k))
(ensures (n1 % k + n2 % k == (n1 + n2) % k))
let mod_add_small n1 n2 k =
mod_add n1 n2 k;
Math.small_modulo_lemma_1 (n1%k + n2%k) k
// This proof is pretty stable with the calc proof, but it can fail
// ~1% of the times, so add a retry.
#push-options "--split_queries no --z3rlimit 20 --retry 5"
let add_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a + v b) % pow2 128 = v r)) =
let l = U64.add_mod a.low b.low in
let r = { low = l;
high = U64.add_mod (U64.add_mod a.high b.high) (carry l b.low)} in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
let b_l = U64.v b.low in
let b_h = U64.v b.high in
carry_sum_ok a.low b.low;
Math.lemma_mod_plus_distr_l (a_h + b_h) ((a_l + b_l) / (pow2 64)) (pow2 64);
calc (==) {
U64.v r.high * pow2 64;
== {}
((a_h + b_h + (a_l + b_l) / (pow2 64)) % pow2 64) * pow2 64;
== { mod_mul (a_h + b_h + (a_l + b_l) / (pow2 64)) (pow2 64) (pow2 64) }
((a_h + b_h + (a_l + b_l) / (pow2 64)) * pow2 64) % (pow2 64 * pow2 64);
== {}
((a_h + b_h + (a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
== {}
(a_h * pow2 64 + b_h * pow2 64 + ((a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
};
assert (U64.v r.low == (U64.v r.low) % pow2 128);
mod_add_small (a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64))
((a_l + b_l) % (pow2 64))
(pow2 128);
assert (U64.v r.low + U64.v r.high * pow2 64 ==
(a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64) + (a_l + b_l) % (pow2 64)) % pow2 128);
mod_spec_rew_n (a_l + b_l) (pow2 64);
assert (v r ==
(a_h * pow2 64 +
b_h * pow2 64 +
a_l + b_l) % pow2 128);
assert_spinoff ((v a + v b) % pow2 128 = v r);
r
#pop-options
#push-options "--retry 5"
let sub (a b: t) : Pure t
(requires (v a - v b >= 0))
(ensures (fun r -> v r = v a - v b)) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub (U64.sub a.high b.high) (carry a.low l); }
#pop-options
let sub_underspec (a b: t) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_underspec (U64.sub_underspec a.high b.high) (carry a.low l); }
let sub_mod_impl (a b: t) : t =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_mod (U64.sub_mod a.high b.high) (carry a.low l); }
#push-options "--retry 10" // flaky
let sub_mod_pos_ok (a b:t) : Lemma
(requires (v a - v b >= 0))
(ensures (v (sub_mod_impl a b) = v a - v b)) =
assert (sub a b == sub_mod_impl a b);
()
#pop-options
val u64_diff_wrap : a:U64.t -> b:U64.t ->
Lemma (requires (U64.v a < U64.v b))
(ensures (U64.v (U64.sub_mod a b) == U64.v a - U64.v b + pow2 64))
let u64_diff_wrap a b = ()
#push-options "--z3rlimit 20"
val sub_mod_wrap1_ok : a:t -> b:t -> Lemma
(requires (v a - v b < 0 /\ U64.v a.low < U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128))
#push-options "--retry 10"
let sub_mod_wrap1_ok a b =
// carry == 1 and subtraction in low wraps
let l = U64.sub_mod a.low b.low in
assert (U64.v (carry a.low l) == 1);
u64_diff_wrap a.low b.low;
// a.high <= b.high since v a < v b;
// case split on equality and strictly less
if U64.v a.high = U64.v b.high then ()
else begin
u64_diff_wrap a.high b.high;
()
end
#pop-options
let sum_lt (a1 a2 b1 b2:nat) : Lemma
(requires (a1 + a2 < b1 + b2 /\ a1 >= b1))
(ensures (a2 < b2)) = ()
let sub_mod_wrap2_ok (a b:t) : Lemma
(requires (v a - v b < 0 /\ U64.v a.low >= U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
// carry == 0, subtraction in low is exact, but subtraction in high
// must wrap since v a < v b
let l = U64.sub_mod a.low b.low in
let r = sub_mod_impl a b in
assert (U64.v l == U64.v a.low - U64.v b.low);
assert (U64.v (carry a.low l) == 0);
sum_lt (U64.v a.low) (U64.v a.high * pow2 64) (U64.v b.low) (U64.v b.high * pow2 64);
assert (U64.v (U64.sub_mod a.high b.high) == U64.v a.high - U64.v b.high + pow2 64);
()
let sub_mod_wrap_ok (a b:t) : Lemma
(requires (v a - v b < 0))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
if U64.v a.low < U64.v b.low
then sub_mod_wrap1_ok a b
else sub_mod_wrap2_ok a b
#push-options "--z3rlimit 40"
let sub_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = (v a - v b) % pow2 128)) =
(if v a - v b >= 0
then sub_mod_pos_ok a b
else sub_mod_wrap_ok a b);
sub_mod_impl a b
#pop-options
val shift_bound : #n:nat -> num:UInt.uint_t n -> n':nat ->
Lemma (num * pow2 n' <= pow2 (n'+n) - pow2 n')
let shift_bound #n num n' =
Math.lemma_mult_le_right (pow2 n') num (pow2 n - 1);
Math.distributivity_sub_left (pow2 n) 1 (pow2 n');
Math.pow2_plus n' n
val append_uint : #n1:nat -> #n2:nat -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 -> UInt.uint_t (n1+n2)
let append_uint #n1 #n2 num1 num2 =
shift_bound num2 n1;
num1 + num2 * pow2 n1
val to_vec_append : #n1:nat{n1 > 0} -> #n2:nat{n2 > 0} -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 ->
Lemma (UInt.to_vec (append_uint num1 num2) == Seq.append (UInt.to_vec num2) (UInt.to_vec num1))
let to_vec_append #n1 #n2 num1 num2 =
UInt.append_lemma (UInt.to_vec num2) (UInt.to_vec num1)
let vec128 (a: t) : BV.bv_t 128 = UInt.to_vec #128 (v a)
let vec64 (a: U64.t) : BV.bv_t 64 = UInt.to_vec (U64.v a)
let to_vec_v (a: t) :
Lemma (vec128 a == Seq.append (vec64 a.high) (vec64 a.low)) =
to_vec_append (U64.v a.low) (U64.v a.high)
val logand_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2) ==
BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2))
(BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logand #128 (v a) (v b))) =
let r = { low = U64.logand a.low b.low;
high = U64.logand a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logand_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logand_vec (vec128 a) (vec128 b));
r
val logxor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2) ==
BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2))
(BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logxor #128 (v a) (v b))) =
let r = { low = U64.logxor a.low b.low;
high = U64.logxor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logxor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logxor_vec (vec128 a) (vec128 b));
r
val logor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2) ==
BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2))
(BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logor #128 (v a) (v b))) =
let r = { low = U64.logor a.low b.low;
high = U64.logor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logor_vec (vec128 a) (vec128 b));
r
val lognot_vec_append (#n1 #n2: pos) (a1: BV.bv_t n1) (a2: BV.bv_t n2) :
Lemma (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2) ==
BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot_vec_append #n1 #n2 a1 a2 =
Seq.lemma_eq_intro (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2))
(BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot (a: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.lognot #128 (v a))) =
let r = { low = U64.lognot a.low;
high = U64.lognot a.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
lognot_vec_append (vec64 a.high) (vec64 a.low);
to_vec_v a;
assert (vec128 r == BV.lognot_vec (vec128 a));
r
let mod_mul_cancel (n:nat) (k:nat{k > 0}) :
Lemma ((n * k) % k == 0) =
mod_spec (n * k) k;
mul_div_cancel n k;
()
let shift_past_mod (n:nat) (k1:nat) (k2:nat{k2 >= k1}) :
Lemma ((n * pow2 k2) % pow2 k1 == 0) =
assert (k2 == (k2 - k1) + k1);
Math.pow2_plus (k2 - k1) k1;
Math.paren_mul_right n (pow2 (k2 - k1)) (pow2 k1);
mod_mul_cancel (n * pow2 (k2 - k1)) (pow2 k1)
val mod_double: a:nat -> k:nat{k>0} ->
Lemma (a % k % k == a % k)
let mod_double a k =
mod_mod a k 1
let shift_left_large_val (#n1:nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s:nat) :
Lemma ((a1 + a2 * pow2 n1) * pow2 s == (a1 * pow2 s + a2 * pow2 (n1+s))) =
Math.distributivity_add_left a1 (a2 * pow2 n1) (pow2 s);
Math.paren_mul_right a2 (pow2 n1) (pow2 s);
Math.pow2_plus n1 s
#push-options "--z3rlimit 40"
let shift_left_large_lemma (#n1: nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s: nat{s >= n2}) :
Lemma (((a1 + a2 * pow2 n1) * pow2 s) % pow2 (n1+n2) ==
(a1 * pow2 s) % pow2 (n1+n2)) =
shift_left_large_val a1 a2 s;
mod_add (a1 * pow2 s) (a2 * pow2 (n1+s)) (pow2 (n1+n2));
shift_past_mod a2 (n1+n2) (n1+s);
mod_double (a1 * pow2 s) (pow2 (n1+n2));
()
#pop-options
val shift_left_large_lemma_t : a:t -> s:nat ->
Lemma (requires (s >= 64))
(ensures ((v a * pow2 s) % pow2 128 ==
(U64.v a.low * pow2 s) % pow2 128))
let shift_left_large_lemma_t a s =
shift_left_large_lemma #64 #64 (U64.v a.low) (U64.v a.high) s
private let u32_64: n:U32.t{U32.v n == 64} = U32.uint_to_t 64
val div_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} -> Lemma
(requires True)
(ensures (a / pow2 (n1 - n2) == a * pow2 n2 / pow2 n1))
let div_pow2_diff a n1 n2 =
Math.pow2_plus n2 (n1-n2);
assert (a * pow2 n2 / pow2 n1 == a * pow2 n2 / (pow2 n2 * pow2 (n1 - n2)));
div_product (a * pow2 n2) (pow2 n2) (pow2 (n1-n2));
mul_div_cancel a (pow2 n2)
val mod_mul_pow2 : n:nat -> e1:nat -> e2:nat ->
Lemma (n % pow2 e1 * pow2 e2 <= pow2 (e1+e2) - pow2 e2)
let mod_mul_pow2 n e1 e2 =
Math.lemma_mod_lt n (pow2 e1);
Math.lemma_mult_le_right (pow2 e2) (n % pow2 e1) (pow2 e1 - 1);
assert (n % pow2 e1 * pow2 e2 <= pow2 e1 * pow2 e2 - pow2 e2);
Math.pow2_plus e1 e2
let pow2_div_bound #b (n:UInt.uint_t b) (s:nat{s <= b}) :
Lemma (n / pow2 s < pow2 (b - s)) =
Math.lemma_div_lt n b s
#push-options "--smtencoding.l_arith_repr native --z3rlimit 40"
let add_u64_shift_left (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r = (U64.v hi * pow2 (U32.v s)) % pow2 64 + U64.v lo / pow2 (64 - U32.v s))) =
let high = U64.shift_left hi s in
let low = U64.shift_right lo (U32.sub u32_64 s) in
let s = U32.v s in
let high_n = U64.v hi % pow2 (64 - s) * pow2 s in
let low_n = U64.v lo / pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 s) (pow2 (64-s));
assert (U64.v high == high_n);
assert (U64.v low == low_n);
pow2_div_bound (U64.v lo) (64-s);
assert (low_n < pow2 s);
mod_mul_pow2 (U64.v hi) (64 - s) s;
U64.add high low
#pop-options
let div_plus_multiple (a:nat) (b:nat) (k:pos) :
Lemma (requires (a < k))
(ensures ((a + b * k) / k == b)) =
Math.division_addition_lemma a k b;
Math.small_division_lemma_1 a k
val div_add_small: n:nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (k1*m / (k1*k2) == (n + k1*m) / (k1*k2)))
let div_add_small n m k1 k2 =
div_product (k1*m) k1 k2;
div_product (n+k1*m) k1 k2;
mul_div_cancel m k1;
assert (k1*m/k1 == m);
div_plus_multiple n m k1
val add_mod_small: n: nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (n + (k1 * m) % (k1 * k2) ==
(n + k1 * m) % (k1 * k2)))
let add_mod_small n m k1 k2 =
mod_spec (k1 * m) (k1 * k2);
mod_spec (n + k1 * m) (k1 * k2);
div_add_small n m k1 k2
let mod_then_mul_64 (n:nat) : Lemma (n % pow2 64 * pow2 64 == n * pow2 64 % pow2 128) =
Math.pow2_plus 64 64;
mod_mul n (pow2 64) (pow2 64)
let mul_abc_to_acb (a b c: int) : Lemma (a * b * c == a * c * b) = ()
let add_u64_shift_left_respec (hi lo:U64.t) (s:U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r ->
U64.v r * pow2 64 ==
(U64.v hi * pow2 64) * pow2 (U32.v s) % pow2 128 +
U64.v lo * pow2 (U32.v s) / pow2 64 * pow2 64)) =
let r = add_u64_shift_left hi lo s in
let hi = U64.v hi in
let lo = U64.v lo in
let s = U32.v s in
// spec of add_u64_shift_left
assert (U64.v r == hi * pow2 s % pow2 64 + lo / pow2 (64 - s));
Math.distributivity_add_left (hi * pow2 s % pow2 64) (lo / pow2 (64-s)) (pow2 64);
mod_then_mul_64 (hi * pow2 s);
assert (hi * pow2 s % pow2 64 * pow2 64 == (hi * pow2 s * pow2 64) % pow2 128);
div_pow2_diff lo 64 s;
assert (lo / pow2 (64-s) == lo * pow2 s / pow2 64);
assert (U64.v r * pow2 64 == hi * pow2 s * pow2 64 % pow2 128 + lo * pow2 s / pow2 64 * pow2 64);
mul_abc_to_acb hi (pow2 s) (pow2 64);
r
val add_mod_small' : n:nat -> m:nat -> k:pos ->
Lemma (requires (n + m % k < k))
(ensures (n + m % k == (n + m) % k))
let add_mod_small' n m k =
Math.lemma_mod_lt (n + m % k) k;
Math.modulo_lemma n k;
mod_add n m k
#push-options "--retry 5"
let shift_t_val (a: t) (s: nat) :
Lemma (v a * pow2 s == U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s)) =
Math.pow2_plus 64 s;
()
#pop-options
val mul_mod_bound : n:nat -> s1:nat -> s2:nat{s2>=s1} ->
Lemma (n * pow2 s1 % pow2 s2 <= pow2 s2 - pow2 s1)
#push-options "--retry 5"
let mul_mod_bound n s1 s2 =
// n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1
// n % pow2 (s2-s1) <= pow2 (s2-s1) - 1
// n % pow2 (s2-s1) * pow2 s1 <= pow2 s2 - pow2 s1
mod_mul n (pow2 s1) (pow2 (s2-s1));
// assert (n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1);
Math.lemma_mod_lt n (pow2 (s2-s1));
Math.lemma_mult_le_right (pow2 s1) (n % pow2 (s2-s1)) (pow2 (s2-s1) - 1);
Math.pow2_plus (s2-s1) s1
#pop-options
let add_lt_le (a a' b b': int) :
Lemma (requires (a < a' /\ b <= b'))
(ensures (a + b < a' + b')) = ()
let u64_pow2_bound (a: UInt.uint_t 64) (s: nat) :
Lemma (a * pow2 s < pow2 (64+s)) =
Math.pow2_plus 64 s;
Math.lemma_mult_le_right (pow2 s) a (pow2 64)
let shift_t_mod_val' (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s) % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
u64_pow2_bound a_l s;
mul_mod_bound a_h (64+s) 128;
// assert (a_h * pow2 (64+s) % pow2 128 <= pow2 128 - pow2 (64+s));
add_lt_le (a_l * pow2 s) (pow2 (64+s)) (a_h * pow2 (64+s) % pow2 128) (pow2 128 - pow2 (64+s));
add_mod_small' (a_l * pow2 s) (a_h * pow2 (64+s)) (pow2 128);
shift_t_val a s;
()
let shift_t_mod_val (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + (U64.v a.high * pow2 64) * pow2 s % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
shift_t_mod_val' a s;
Math.pow2_plus 64 s;
Math.paren_mul_right a_h (pow2 64) (pow2 s);
()
#push-options "--z3rlimit 20"
let shift_left_small (a: t) (s: U32.t) : Pure t
(requires (U32.v s < 64))
(ensures (fun r -> v r = (v a * pow2 (U32.v s)) % pow2 128)) =
if U32.eq s 0ul then a
else
let r = { low = U64.shift_left a.low s;
high = add_u64_shift_left_respec a.high a.low s; } in
let s = U32.v s in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
mod_spec_rew_n (a_l * pow2 s) (pow2 64);
shift_t_mod_val a s;
r
#pop-options
val shift_left_large : a:t -> s:U32.t{U32.v s >= 64 /\ U32.v s < 128} ->
r:t{v r = (v a * pow2 (U32.v s)) % pow2 128}
#push-options "--z3rlimit 50 --retry 5" // sporadically fails
let shift_left_large a s =
let h_shift = U32.sub s u32_64 in
assert (U32.v h_shift < 64);
let r = { low = U64.uint_to_t 0;
high = U64.shift_left a.low h_shift; } in
assert (U64.v r.high == (U64.v a.low * pow2 (U32.v s - 64)) % pow2 64);
mod_mul (U64.v a.low * pow2 (U32.v s - 64)) (pow2 64) (pow2 64);
Math.pow2_plus (U32.v s - 64) 64;
assert (U64.v r.high * pow2 64 == (U64.v a.low * pow2 (U32.v s)) % pow2 128);
shift_left_large_lemma_t a (U32.v s);
r
#pop-options
let shift_left a s =
if (U32.lt s u32_64) then shift_left_small a s
else shift_left_large a s
let add_u64_shift_right (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r == U64.v lo / pow2 (U32.v s) +
U64.v hi * pow2 (64 - U32.v s) % pow2 64)) =
let low = U64.shift_right lo s in
let high = U64.shift_left hi (U32.sub u32_64 s) in
let s = U32.v s in
let low_n = U64.v lo / pow2 s in
let high_n = U64.v hi % pow2 s * pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 (64-s)) (pow2 s);
assert (U64.v high == high_n);
pow2_div_bound (U64.v lo) s;
assert (low_n < pow2 (64 - s));
mod_mul_pow2 (U64.v hi) s (64 - s);
U64.add low high
val mul_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} ->
Lemma (a * pow2 (n1 - n2) == a * pow2 n1 / pow2 n2)
let mul_pow2_diff a n1 n2 =
Math.paren_mul_right a (pow2 (n1-n2)) (pow2 n2);
mul_div_cancel (a * pow2 (n1 - n2)) (pow2 n2);
Math.pow2_plus (n1 - n2) n2
let add_u64_shift_right_respec (hi lo:U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r == U64.v lo / pow2 (U32.v s) +
U64.v hi * pow2 64 / pow2 (U32.v s) % pow2 64)) =
let r = add_u64_shift_right hi lo s in
let s = U32.v s in
mul_pow2_diff (U64.v hi) 64 s;
r
let mul_div_spec (n:nat) (k:pos) : Lemma (n / k * k == n - n % k) = ()
let mul_distr_sub (n1 n2:nat) (k:nat) : Lemma ((n1 - n2) * k == n1 * k - n2 * k) = ()
val div_product_comm : n1:nat -> k1:pos -> k2:pos ->
Lemma (n1 / k1 / k2 == n1 / k2 / k1)
let div_product_comm n1 k1 k2 =
div_product n1 k1 k2;
div_product n1 k2 k1
val shift_right_reconstruct : a_h:UInt.uint_t 64 -> s:nat{s < 64} ->
Lemma (a_h * pow2 (64-s) == a_h / pow2 s * pow2 64 + a_h * pow2 64 / pow2 s % pow2 64)
let shift_right_reconstruct a_h s =
mul_pow2_diff a_h 64 s;
mod_spec_rew_n (a_h * pow2 (64-s)) (pow2 64);
div_product_comm (a_h * pow2 64) (pow2 s) (pow2 64);
mul_div_cancel a_h (pow2 64);
assert (a_h / pow2 s * pow2 64 == a_h * pow2 64 / pow2 s / pow2 64 * pow2 64);
()
val u128_div_pow2 (a: t) (s:nat{s < 64}) :
Lemma (v a / pow2 s == U64.v a.low / pow2 s + U64.v a.high * pow2 (64 - s))
let u128_div_pow2 a s =
Math.pow2_plus (64-s) s;
Math.paren_mul_right (U64.v a.high) (pow2 (64-s)) (pow2 s);
Math.division_addition_lemma (U64.v a.low) (pow2 s) (U64.v a.high * pow2 (64 - s))
let shift_right_small (a: t) (s: U32.t{U32.v s < 64}) : Pure t
(requires True)
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
if U32.eq s 0ul then a
else
let r = { low = add_u64_shift_right_respec a.high a.low s;
high = U64.shift_right a.high s; } in
let a_h = U64.v a.high in
let a_l = U64.v a.low in
let s = U32.v s in
shift_right_reconstruct a_h s;
assert (v r == a_h * pow2 (64-s) + a_l / pow2 s);
u128_div_pow2 a s;
r
let shift_right_large (a: t) (s: U32.t{U32.v s >= 64 /\ U32.v s < 128}) : Pure t
(requires True)
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
let r = { high = U64.uint_to_t 0;
low = U64.shift_right a.high (U32.sub s u32_64); } in
let s = U32.v s in
Math.pow2_plus 64 (s - 64);
div_product (v a) (pow2 64) (pow2 (s - 64));
assert (v a / pow2 s == v a / pow2 64 / pow2 (s - 64));
div_plus_multiple (U64.v a.low) (U64.v a.high) (pow2 64);
r
let shift_right (a: t) (s: U32.t) : Pure t
(requires (U32.v s < 128))
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
if U32.lt s u32_64
then shift_right_small a s
else shift_right_large a s
let eq (a b:t) = U64.eq a.low b.low && U64.eq a.high b.high
let gt (a b:t) = U64.gt a.high b.high ||
(U64.eq a.high b.high && U64.gt a.low b.low)
let lt (a b:t) = U64.lt a.high b.high ||
(U64.eq a.high b.high && U64.lt a.low b.low)
let gte (a b:t) = U64.gt a.high b.high ||
(U64.eq a.high b.high && U64.gte a.low b.low)
let lte (a b:t) = U64.lt a.high b.high ||
(U64.eq a.high b.high && U64.lte a.low b.low)
let u64_logand_comm (a b:U64.t) : Lemma (U64.logand a b == U64.logand b a) =
UInt.logand_commutative (U64.v a) (U64.v b)
val u64_and_0 (a b:U64.t) :
Lemma (U64.v b = 0 ==> U64.v (U64.logand a b) = 0)
[SMTPat (U64.logand a b)]
let u64_and_0 a b = UInt.logand_lemma_1 (U64.v a)
let u64_0_and (a b:U64.t) :
Lemma (U64.v a = 0 ==> U64.v (U64.logand a b) = 0)
[SMTPat (U64.logand a b)] =
u64_logand_comm a b
val u64_1s_and (a b:U64.t) :
Lemma (U64.v a = pow2 64 - 1 /\
U64.v b = pow2 64 - 1 ==> U64.v (U64.logand a b) = pow2 64 - 1)
[SMTPat (U64.logand a b)]
let u64_1s_and a b = UInt.logand_lemma_2 (U64.v a)
let eq_mask (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a = v b ==> v r = pow2 128 - 1) /\ (v a <> v b ==> v r = 0))) =
let mask = U64.logand (U64.eq_mask a.low b.low)
(U64.eq_mask a.high b.high) in
{ low = mask; high = mask; }
private let gte_characterization (a b: t) :
Lemma (v a >= v b ==>
U64.v a.high > U64.v b.high \/
(U64.v a.high = U64.v b.high /\ U64.v a.low >= U64.v b.low)) = ()
private let lt_characterization (a b: t) :
Lemma (v a < v b ==>
U64.v a.high < U64.v b.high \/
(U64.v a.high = U64.v b.high /\ U64.v a.low < U64.v b.low)) = ()
let u64_logor_comm (a b:U64.t) : Lemma (U64.logor a b == U64.logor b a) =
UInt.logor_commutative (U64.v a) (U64.v b)
val u64_or_1 (a b:U64.t) :
Lemma (U64.v b = pow2 64 - 1 ==> U64.v (U64.logor a b) = pow2 64 - 1)
[SMTPat (U64.logor a b)]
let u64_or_1 a b = UInt.logor_lemma_2 (U64.v a)
let u64_1_or (a b:U64.t) :
Lemma (U64.v a = pow2 64 - 1 ==> U64.v (U64.logor a b) = pow2 64 - 1)
[SMTPat (U64.logor a b)] =
u64_logor_comm a b
val u64_or_0 (a b:U64.t) :
Lemma (U64.v a = 0 /\ U64.v b = 0 ==> U64.v (U64.logor a b) = 0)
[SMTPat (U64.logor a b)]
let u64_or_0 a b = UInt.logor_lemma_1 (U64.v a)
val u64_not_0 (a:U64.t) :
Lemma (U64.v a = 0 ==> U64.v (U64.lognot a) = pow2 64 - 1)
[SMTPat (U64.lognot a)]
let u64_not_0 a = UInt.lognot_lemma_1 #64
val u64_not_1 (a:U64.t) :
Lemma (U64.v a = pow2 64 - 1 ==> U64.v (U64.lognot a) = 0)
[SMTPat (U64.lognot a)]
let u64_not_1 a =
UInt.nth_lemma (UInt.lognot #64 (UInt.ones 64)) (UInt.zero 64)
let gte_mask (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a >= v b ==> v r = pow2 128 - 1) /\ (v a < v b ==> v r = 0))) =
let mask_hi_gte = U64.logand (U64.gte_mask a.high b.high)
(U64.lognot (U64.eq_mask a.high b.high)) in
let mask_lo_gte = U64.logand (U64.eq_mask a.high b.high)
(U64.gte_mask a.low b.low) in
let mask = U64.logor mask_hi_gte mask_lo_gte in
gte_characterization a b;
lt_characterization a b;
{ low = mask; high = mask; } | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val uint64_to_uint128: a:U64.t -> b:t{v b == U64.v a} | [] | FStar.UInt128.uint64_to_uint128 | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: FStar.UInt64.t -> b: FStar.UInt128.t{FStar.UInt128.v b == FStar.UInt64.v a} | {
"end_col": 68,
"end_line": 894,
"start_col": 36,
"start_line": 894
} |
FStar.Pervasives.Lemma | val product_bound (a b: nat) (k: pos)
: Lemma (requires (a < k /\ b < k)) (ensures a * b <= k * k - 2 * k + 1) | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let product_bound (a b:nat) (k:pos) :
Lemma (requires (a < k /\ b < k))
(ensures a * b <= k * k - 2*k + 1) =
Math.lemma_mult_le_right b a (k-1);
Math.lemma_mult_le_left (k-1) b (k-1) | val product_bound (a b: nat) (k: pos)
: Lemma (requires (a < k /\ b < k)) (ensures a * b <= k * k - 2 * k + 1)
let product_bound (a b: nat) (k: pos)
: Lemma (requires (a < k /\ b < k)) (ensures a * b <= k * k - 2 * k + 1) = | false | null | true | Math.lemma_mult_le_right b a (k - 1);
Math.lemma_mult_le_left (k - 1) b (k - 1) | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [
"lemma"
] | [
"Prims.nat",
"Prims.pos",
"FStar.Math.Lemmas.lemma_mult_le_left",
"Prims.op_Subtraction",
"Prims.unit",
"FStar.Math.Lemmas.lemma_mult_le_right",
"Prims.l_and",
"Prims.b2t",
"Prims.op_LessThan",
"Prims.squash",
"Prims.op_LessThanOrEqual",
"FStar.Mul.op_Star",
"Prims.op_Addition",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract
let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63
let carry_uint64_ok (a b:uint_t 64)
: squash (int2bv (carry_uint64 a b) == carry_bv a b)
= _ by (T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl())
let fact1 (a b: uint_t 64) = carry_bv a b == int2bv 1
let fact0 (a b: uint_t 64) = carry_bv a b == int2bv 0
let lem_ult_1 (a b: uint_t 64)
: squash (bvult (int2bv a) (int2bv b) ==> fact1 a b)
= assert (bvult (int2bv a) (int2bv b) ==> fact1 a b)
by (T.norm [delta_only [`%fact1;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'";
smt())
let lem_ult_2 (a b:uint_t 64)
: squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
= assert (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
by (T.norm [delta_only [`%fact0;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'")
let int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b))
= introduce (a < b) ==> (bvult #n (int2bv #n a) (int2bv #n b))
with _ . FStar.BV.int2bv_lemma_ult_1 a b;
introduce (bvult #n (int2bv #n a) (int2bv #n b)) ==> (a < b)
with _ . FStar.BV.int2bv_lemma_ult_2 a b
let lem_ult (a b:uint_t 64)
: Lemma (if a < b
then fact1 a b
else fact0 a b)
= int2bv_ult a b;
lem_ult_1 a b;
lem_ult_2 a b
let constant_time_carry (a b: U64.t) : Tot U64.t =
let open U64 in
// CONSTANT_TIME_CARRY macro
// ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
// 63 = sizeof(a) * 8 - 1
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63ul
let carry_uint64_equiv (a b:UInt64.t)
: Lemma (U64.v (constant_time_carry a b) == carry_uint64 (U64.v a) (U64.v b))
= ()
// This type gets a special treatment in KaRaMeL and its definition is never
// printed in the resulting C file.
type uint128: Type0 = { low: U64.t; high: U64.t }
let t = uint128
let _ = intro_ambient n
let _ = intro_ambient t
[@@ noextract_to "krml"]
let v x = U64.v x.low + (U64.v x.high) * (pow2 64)
let div_mod (x:nat) (k:nat{k > 0}) : Lemma (x / k * k + x % k == x) = ()
let uint_to_t x =
div_mod x (pow2 64);
{ low = U64.uint_to_t (x % (pow2 64));
high = U64.uint_to_t (x / (pow2 64)); }
let v_inj (x1 x2: t): Lemma (requires (v x1 == v x2))
(ensures x1 == x2) =
assert (uint_to_t (v x1) == uint_to_t (v x2));
assert (uint_to_t (v x1) == x1);
assert (uint_to_t (v x2) == x2);
()
(* A weird helper used below... seems like the native encoding of
bitvectors may be making these proofs much harder than they should be? *)
let bv2int_fun (#n:pos) (a b : bv_t n)
: Lemma (requires a == b)
(ensures bv2int a == bv2int b)
= ()
(* This proof is quite brittle. It has a bunch of annotations to get
decent verification performance. *)
let constant_time_carry_ok (a b:U64.t)
: Lemma (constant_time_carry a b ==
(if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0))
= calc (==) {
U64.v (constant_time_carry a b);
(==) { carry_uint64_equiv a b }
carry_uint64 (U64.v a) (U64.v b);
(==) { inverse_num_lemma (carry_uint64 (U64.v a) (U64.v b)) }
bv2int (int2bv (carry_uint64 (U64.v a) (U64.v b)));
(==) { carry_uint64_ok (U64.v a) (U64.v b);
bv2int_fun (int2bv (carry_uint64 (U64.v a) (U64.v b))) (carry_bv (U64.v a) (U64.v b));
()
}
bv2int (carry_bv (U64.v a) (U64.v b));
(==) {
lem_ult (U64.v a) (U64.v b);
bv2int_fun (carry_bv (U64.v a) (U64.v b)) (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
}
bv2int
(if U64.v a < U64.v b
then int2bv 1
else int2bv 0);
};
assert (
bv2int (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
== U64.v (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)) by (T.norm []);
U64.v_inj (constant_time_carry a b) (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)
let carry (a b: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r == (if U64.v a < U64.v b then 1 else 0))) =
constant_time_carry_ok a b;
constant_time_carry a b
let carry_sum_ok (a b:U64.t) :
Lemma (U64.v (carry (U64.add_mod a b) b) == (U64.v a + U64.v b) / (pow2 64)) = ()
let add (a b: t) : Pure t
(requires (v a + v b < pow2 128))
(ensures (fun r -> v a + v b = v r)) =
let l = U64.add_mod a.low b.low in
carry_sum_ok a.low b.low;
{ low = l;
high = U64.add (U64.add a.high b.high) (carry l b.low); }
let add_underspec (a b: t) =
let l = U64.add_mod a.low b.low in
begin
if v a + v b < pow2 128
then carry_sum_ok a.low b.low
else ()
end;
{ low = l;
high = U64.add_underspec (U64.add_underspec a.high b.high) (carry l b.low); }
val mod_mod: a:nat -> k:nat{k>0} -> k':nat{k'>0} ->
Lemma ((a % k) % (k'*k) == a % k)
let mod_mod a k k' =
assert (a % k < k);
assert (a % k < k' * k)
let mod_spec (a:nat) (k:nat{k > 0}) :
Lemma (a % k == a - a / k * k) = ()
val div_product : n:nat -> m1:nat{m1>0} -> m2:nat{m2>0} ->
Lemma (n / (m1*m2) == (n / m1) / m2)
let div_product n m1 m2 =
Math.division_multiplication_lemma n m1 m2
val mul_div_cancel : n:nat -> k:nat{k>0} ->
Lemma ((n * k) / k == n)
let mul_div_cancel n k =
Math.cancel_mul_div n k
val mod_mul: n:nat -> k1:pos -> k2:pos ->
Lemma ((n % k2) * k1 == (n * k1) % (k1*k2))
let mod_mul n k1 k2 =
Math.modulo_scale_lemma n k1 k2
let mod_spec_rew_n (n:nat) (k:nat{k > 0}) :
Lemma (n == n / k * k + n % k) = mod_spec n k
val mod_add: n1:nat -> n2:nat -> k:nat{k > 0} ->
Lemma ((n1 % k + n2 % k) % k == (n1 + n2) % k)
let mod_add n1 n2 k = Math.modulo_distributivity n1 n2 k
val mod_add_small: n1:nat -> n2:nat -> k:nat{k > 0} -> Lemma
(requires (n1 % k + n2 % k < k))
(ensures (n1 % k + n2 % k == (n1 + n2) % k))
let mod_add_small n1 n2 k =
mod_add n1 n2 k;
Math.small_modulo_lemma_1 (n1%k + n2%k) k
// This proof is pretty stable with the calc proof, but it can fail
// ~1% of the times, so add a retry.
#push-options "--split_queries no --z3rlimit 20 --retry 5"
let add_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a + v b) % pow2 128 = v r)) =
let l = U64.add_mod a.low b.low in
let r = { low = l;
high = U64.add_mod (U64.add_mod a.high b.high) (carry l b.low)} in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
let b_l = U64.v b.low in
let b_h = U64.v b.high in
carry_sum_ok a.low b.low;
Math.lemma_mod_plus_distr_l (a_h + b_h) ((a_l + b_l) / (pow2 64)) (pow2 64);
calc (==) {
U64.v r.high * pow2 64;
== {}
((a_h + b_h + (a_l + b_l) / (pow2 64)) % pow2 64) * pow2 64;
== { mod_mul (a_h + b_h + (a_l + b_l) / (pow2 64)) (pow2 64) (pow2 64) }
((a_h + b_h + (a_l + b_l) / (pow2 64)) * pow2 64) % (pow2 64 * pow2 64);
== {}
((a_h + b_h + (a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
== {}
(a_h * pow2 64 + b_h * pow2 64 + ((a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
};
assert (U64.v r.low == (U64.v r.low) % pow2 128);
mod_add_small (a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64))
((a_l + b_l) % (pow2 64))
(pow2 128);
assert (U64.v r.low + U64.v r.high * pow2 64 ==
(a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64) + (a_l + b_l) % (pow2 64)) % pow2 128);
mod_spec_rew_n (a_l + b_l) (pow2 64);
assert (v r ==
(a_h * pow2 64 +
b_h * pow2 64 +
a_l + b_l) % pow2 128);
assert_spinoff ((v a + v b) % pow2 128 = v r);
r
#pop-options
#push-options "--retry 5"
let sub (a b: t) : Pure t
(requires (v a - v b >= 0))
(ensures (fun r -> v r = v a - v b)) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub (U64.sub a.high b.high) (carry a.low l); }
#pop-options
let sub_underspec (a b: t) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_underspec (U64.sub_underspec a.high b.high) (carry a.low l); }
let sub_mod_impl (a b: t) : t =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_mod (U64.sub_mod a.high b.high) (carry a.low l); }
#push-options "--retry 10" // flaky
let sub_mod_pos_ok (a b:t) : Lemma
(requires (v a - v b >= 0))
(ensures (v (sub_mod_impl a b) = v a - v b)) =
assert (sub a b == sub_mod_impl a b);
()
#pop-options
val u64_diff_wrap : a:U64.t -> b:U64.t ->
Lemma (requires (U64.v a < U64.v b))
(ensures (U64.v (U64.sub_mod a b) == U64.v a - U64.v b + pow2 64))
let u64_diff_wrap a b = ()
#push-options "--z3rlimit 20"
val sub_mod_wrap1_ok : a:t -> b:t -> Lemma
(requires (v a - v b < 0 /\ U64.v a.low < U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128))
#push-options "--retry 10"
let sub_mod_wrap1_ok a b =
// carry == 1 and subtraction in low wraps
let l = U64.sub_mod a.low b.low in
assert (U64.v (carry a.low l) == 1);
u64_diff_wrap a.low b.low;
// a.high <= b.high since v a < v b;
// case split on equality and strictly less
if U64.v a.high = U64.v b.high then ()
else begin
u64_diff_wrap a.high b.high;
()
end
#pop-options
let sum_lt (a1 a2 b1 b2:nat) : Lemma
(requires (a1 + a2 < b1 + b2 /\ a1 >= b1))
(ensures (a2 < b2)) = ()
let sub_mod_wrap2_ok (a b:t) : Lemma
(requires (v a - v b < 0 /\ U64.v a.low >= U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
// carry == 0, subtraction in low is exact, but subtraction in high
// must wrap since v a < v b
let l = U64.sub_mod a.low b.low in
let r = sub_mod_impl a b in
assert (U64.v l == U64.v a.low - U64.v b.low);
assert (U64.v (carry a.low l) == 0);
sum_lt (U64.v a.low) (U64.v a.high * pow2 64) (U64.v b.low) (U64.v b.high * pow2 64);
assert (U64.v (U64.sub_mod a.high b.high) == U64.v a.high - U64.v b.high + pow2 64);
()
let sub_mod_wrap_ok (a b:t) : Lemma
(requires (v a - v b < 0))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
if U64.v a.low < U64.v b.low
then sub_mod_wrap1_ok a b
else sub_mod_wrap2_ok a b
#push-options "--z3rlimit 40"
let sub_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = (v a - v b) % pow2 128)) =
(if v a - v b >= 0
then sub_mod_pos_ok a b
else sub_mod_wrap_ok a b);
sub_mod_impl a b
#pop-options
val shift_bound : #n:nat -> num:UInt.uint_t n -> n':nat ->
Lemma (num * pow2 n' <= pow2 (n'+n) - pow2 n')
let shift_bound #n num n' =
Math.lemma_mult_le_right (pow2 n') num (pow2 n - 1);
Math.distributivity_sub_left (pow2 n) 1 (pow2 n');
Math.pow2_plus n' n
val append_uint : #n1:nat -> #n2:nat -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 -> UInt.uint_t (n1+n2)
let append_uint #n1 #n2 num1 num2 =
shift_bound num2 n1;
num1 + num2 * pow2 n1
val to_vec_append : #n1:nat{n1 > 0} -> #n2:nat{n2 > 0} -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 ->
Lemma (UInt.to_vec (append_uint num1 num2) == Seq.append (UInt.to_vec num2) (UInt.to_vec num1))
let to_vec_append #n1 #n2 num1 num2 =
UInt.append_lemma (UInt.to_vec num2) (UInt.to_vec num1)
let vec128 (a: t) : BV.bv_t 128 = UInt.to_vec #128 (v a)
let vec64 (a: U64.t) : BV.bv_t 64 = UInt.to_vec (U64.v a)
let to_vec_v (a: t) :
Lemma (vec128 a == Seq.append (vec64 a.high) (vec64 a.low)) =
to_vec_append (U64.v a.low) (U64.v a.high)
val logand_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2) ==
BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2))
(BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logand #128 (v a) (v b))) =
let r = { low = U64.logand a.low b.low;
high = U64.logand a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logand_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logand_vec (vec128 a) (vec128 b));
r
val logxor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2) ==
BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2))
(BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logxor #128 (v a) (v b))) =
let r = { low = U64.logxor a.low b.low;
high = U64.logxor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logxor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logxor_vec (vec128 a) (vec128 b));
r
val logor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2) ==
BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2))
(BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logor #128 (v a) (v b))) =
let r = { low = U64.logor a.low b.low;
high = U64.logor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logor_vec (vec128 a) (vec128 b));
r
val lognot_vec_append (#n1 #n2: pos) (a1: BV.bv_t n1) (a2: BV.bv_t n2) :
Lemma (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2) ==
BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot_vec_append #n1 #n2 a1 a2 =
Seq.lemma_eq_intro (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2))
(BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot (a: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.lognot #128 (v a))) =
let r = { low = U64.lognot a.low;
high = U64.lognot a.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
lognot_vec_append (vec64 a.high) (vec64 a.low);
to_vec_v a;
assert (vec128 r == BV.lognot_vec (vec128 a));
r
let mod_mul_cancel (n:nat) (k:nat{k > 0}) :
Lemma ((n * k) % k == 0) =
mod_spec (n * k) k;
mul_div_cancel n k;
()
let shift_past_mod (n:nat) (k1:nat) (k2:nat{k2 >= k1}) :
Lemma ((n * pow2 k2) % pow2 k1 == 0) =
assert (k2 == (k2 - k1) + k1);
Math.pow2_plus (k2 - k1) k1;
Math.paren_mul_right n (pow2 (k2 - k1)) (pow2 k1);
mod_mul_cancel (n * pow2 (k2 - k1)) (pow2 k1)
val mod_double: a:nat -> k:nat{k>0} ->
Lemma (a % k % k == a % k)
let mod_double a k =
mod_mod a k 1
let shift_left_large_val (#n1:nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s:nat) :
Lemma ((a1 + a2 * pow2 n1) * pow2 s == (a1 * pow2 s + a2 * pow2 (n1+s))) =
Math.distributivity_add_left a1 (a2 * pow2 n1) (pow2 s);
Math.paren_mul_right a2 (pow2 n1) (pow2 s);
Math.pow2_plus n1 s
#push-options "--z3rlimit 40"
let shift_left_large_lemma (#n1: nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s: nat{s >= n2}) :
Lemma (((a1 + a2 * pow2 n1) * pow2 s) % pow2 (n1+n2) ==
(a1 * pow2 s) % pow2 (n1+n2)) =
shift_left_large_val a1 a2 s;
mod_add (a1 * pow2 s) (a2 * pow2 (n1+s)) (pow2 (n1+n2));
shift_past_mod a2 (n1+n2) (n1+s);
mod_double (a1 * pow2 s) (pow2 (n1+n2));
()
#pop-options
val shift_left_large_lemma_t : a:t -> s:nat ->
Lemma (requires (s >= 64))
(ensures ((v a * pow2 s) % pow2 128 ==
(U64.v a.low * pow2 s) % pow2 128))
let shift_left_large_lemma_t a s =
shift_left_large_lemma #64 #64 (U64.v a.low) (U64.v a.high) s
private let u32_64: n:U32.t{U32.v n == 64} = U32.uint_to_t 64
val div_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} -> Lemma
(requires True)
(ensures (a / pow2 (n1 - n2) == a * pow2 n2 / pow2 n1))
let div_pow2_diff a n1 n2 =
Math.pow2_plus n2 (n1-n2);
assert (a * pow2 n2 / pow2 n1 == a * pow2 n2 / (pow2 n2 * pow2 (n1 - n2)));
div_product (a * pow2 n2) (pow2 n2) (pow2 (n1-n2));
mul_div_cancel a (pow2 n2)
val mod_mul_pow2 : n:nat -> e1:nat -> e2:nat ->
Lemma (n % pow2 e1 * pow2 e2 <= pow2 (e1+e2) - pow2 e2)
let mod_mul_pow2 n e1 e2 =
Math.lemma_mod_lt n (pow2 e1);
Math.lemma_mult_le_right (pow2 e2) (n % pow2 e1) (pow2 e1 - 1);
assert (n % pow2 e1 * pow2 e2 <= pow2 e1 * pow2 e2 - pow2 e2);
Math.pow2_plus e1 e2
let pow2_div_bound #b (n:UInt.uint_t b) (s:nat{s <= b}) :
Lemma (n / pow2 s < pow2 (b - s)) =
Math.lemma_div_lt n b s
#push-options "--smtencoding.l_arith_repr native --z3rlimit 40"
let add_u64_shift_left (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r = (U64.v hi * pow2 (U32.v s)) % pow2 64 + U64.v lo / pow2 (64 - U32.v s))) =
let high = U64.shift_left hi s in
let low = U64.shift_right lo (U32.sub u32_64 s) in
let s = U32.v s in
let high_n = U64.v hi % pow2 (64 - s) * pow2 s in
let low_n = U64.v lo / pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 s) (pow2 (64-s));
assert (U64.v high == high_n);
assert (U64.v low == low_n);
pow2_div_bound (U64.v lo) (64-s);
assert (low_n < pow2 s);
mod_mul_pow2 (U64.v hi) (64 - s) s;
U64.add high low
#pop-options
let div_plus_multiple (a:nat) (b:nat) (k:pos) :
Lemma (requires (a < k))
(ensures ((a + b * k) / k == b)) =
Math.division_addition_lemma a k b;
Math.small_division_lemma_1 a k
val div_add_small: n:nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (k1*m / (k1*k2) == (n + k1*m) / (k1*k2)))
let div_add_small n m k1 k2 =
div_product (k1*m) k1 k2;
div_product (n+k1*m) k1 k2;
mul_div_cancel m k1;
assert (k1*m/k1 == m);
div_plus_multiple n m k1
val add_mod_small: n: nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (n + (k1 * m) % (k1 * k2) ==
(n + k1 * m) % (k1 * k2)))
let add_mod_small n m k1 k2 =
mod_spec (k1 * m) (k1 * k2);
mod_spec (n + k1 * m) (k1 * k2);
div_add_small n m k1 k2
let mod_then_mul_64 (n:nat) : Lemma (n % pow2 64 * pow2 64 == n * pow2 64 % pow2 128) =
Math.pow2_plus 64 64;
mod_mul n (pow2 64) (pow2 64)
let mul_abc_to_acb (a b c: int) : Lemma (a * b * c == a * c * b) = ()
let add_u64_shift_left_respec (hi lo:U64.t) (s:U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r ->
U64.v r * pow2 64 ==
(U64.v hi * pow2 64) * pow2 (U32.v s) % pow2 128 +
U64.v lo * pow2 (U32.v s) / pow2 64 * pow2 64)) =
let r = add_u64_shift_left hi lo s in
let hi = U64.v hi in
let lo = U64.v lo in
let s = U32.v s in
// spec of add_u64_shift_left
assert (U64.v r == hi * pow2 s % pow2 64 + lo / pow2 (64 - s));
Math.distributivity_add_left (hi * pow2 s % pow2 64) (lo / pow2 (64-s)) (pow2 64);
mod_then_mul_64 (hi * pow2 s);
assert (hi * pow2 s % pow2 64 * pow2 64 == (hi * pow2 s * pow2 64) % pow2 128);
div_pow2_diff lo 64 s;
assert (lo / pow2 (64-s) == lo * pow2 s / pow2 64);
assert (U64.v r * pow2 64 == hi * pow2 s * pow2 64 % pow2 128 + lo * pow2 s / pow2 64 * pow2 64);
mul_abc_to_acb hi (pow2 s) (pow2 64);
r
val add_mod_small' : n:nat -> m:nat -> k:pos ->
Lemma (requires (n + m % k < k))
(ensures (n + m % k == (n + m) % k))
let add_mod_small' n m k =
Math.lemma_mod_lt (n + m % k) k;
Math.modulo_lemma n k;
mod_add n m k
#push-options "--retry 5"
let shift_t_val (a: t) (s: nat) :
Lemma (v a * pow2 s == U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s)) =
Math.pow2_plus 64 s;
()
#pop-options
val mul_mod_bound : n:nat -> s1:nat -> s2:nat{s2>=s1} ->
Lemma (n * pow2 s1 % pow2 s2 <= pow2 s2 - pow2 s1)
#push-options "--retry 5"
let mul_mod_bound n s1 s2 =
// n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1
// n % pow2 (s2-s1) <= pow2 (s2-s1) - 1
// n % pow2 (s2-s1) * pow2 s1 <= pow2 s2 - pow2 s1
mod_mul n (pow2 s1) (pow2 (s2-s1));
// assert (n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1);
Math.lemma_mod_lt n (pow2 (s2-s1));
Math.lemma_mult_le_right (pow2 s1) (n % pow2 (s2-s1)) (pow2 (s2-s1) - 1);
Math.pow2_plus (s2-s1) s1
#pop-options
let add_lt_le (a a' b b': int) :
Lemma (requires (a < a' /\ b <= b'))
(ensures (a + b < a' + b')) = ()
let u64_pow2_bound (a: UInt.uint_t 64) (s: nat) :
Lemma (a * pow2 s < pow2 (64+s)) =
Math.pow2_plus 64 s;
Math.lemma_mult_le_right (pow2 s) a (pow2 64)
let shift_t_mod_val' (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s) % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
u64_pow2_bound a_l s;
mul_mod_bound a_h (64+s) 128;
// assert (a_h * pow2 (64+s) % pow2 128 <= pow2 128 - pow2 (64+s));
add_lt_le (a_l * pow2 s) (pow2 (64+s)) (a_h * pow2 (64+s) % pow2 128) (pow2 128 - pow2 (64+s));
add_mod_small' (a_l * pow2 s) (a_h * pow2 (64+s)) (pow2 128);
shift_t_val a s;
()
let shift_t_mod_val (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + (U64.v a.high * pow2 64) * pow2 s % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
shift_t_mod_val' a s;
Math.pow2_plus 64 s;
Math.paren_mul_right a_h (pow2 64) (pow2 s);
()
#push-options "--z3rlimit 20"
let shift_left_small (a: t) (s: U32.t) : Pure t
(requires (U32.v s < 64))
(ensures (fun r -> v r = (v a * pow2 (U32.v s)) % pow2 128)) =
if U32.eq s 0ul then a
else
let r = { low = U64.shift_left a.low s;
high = add_u64_shift_left_respec a.high a.low s; } in
let s = U32.v s in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
mod_spec_rew_n (a_l * pow2 s) (pow2 64);
shift_t_mod_val a s;
r
#pop-options
val shift_left_large : a:t -> s:U32.t{U32.v s >= 64 /\ U32.v s < 128} ->
r:t{v r = (v a * pow2 (U32.v s)) % pow2 128}
#push-options "--z3rlimit 50 --retry 5" // sporadically fails
let shift_left_large a s =
let h_shift = U32.sub s u32_64 in
assert (U32.v h_shift < 64);
let r = { low = U64.uint_to_t 0;
high = U64.shift_left a.low h_shift; } in
assert (U64.v r.high == (U64.v a.low * pow2 (U32.v s - 64)) % pow2 64);
mod_mul (U64.v a.low * pow2 (U32.v s - 64)) (pow2 64) (pow2 64);
Math.pow2_plus (U32.v s - 64) 64;
assert (U64.v r.high * pow2 64 == (U64.v a.low * pow2 (U32.v s)) % pow2 128);
shift_left_large_lemma_t a (U32.v s);
r
#pop-options
let shift_left a s =
if (U32.lt s u32_64) then shift_left_small a s
else shift_left_large a s
let add_u64_shift_right (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r == U64.v lo / pow2 (U32.v s) +
U64.v hi * pow2 (64 - U32.v s) % pow2 64)) =
let low = U64.shift_right lo s in
let high = U64.shift_left hi (U32.sub u32_64 s) in
let s = U32.v s in
let low_n = U64.v lo / pow2 s in
let high_n = U64.v hi % pow2 s * pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 (64-s)) (pow2 s);
assert (U64.v high == high_n);
pow2_div_bound (U64.v lo) s;
assert (low_n < pow2 (64 - s));
mod_mul_pow2 (U64.v hi) s (64 - s);
U64.add low high
val mul_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} ->
Lemma (a * pow2 (n1 - n2) == a * pow2 n1 / pow2 n2)
let mul_pow2_diff a n1 n2 =
Math.paren_mul_right a (pow2 (n1-n2)) (pow2 n2);
mul_div_cancel (a * pow2 (n1 - n2)) (pow2 n2);
Math.pow2_plus (n1 - n2) n2
let add_u64_shift_right_respec (hi lo:U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r == U64.v lo / pow2 (U32.v s) +
U64.v hi * pow2 64 / pow2 (U32.v s) % pow2 64)) =
let r = add_u64_shift_right hi lo s in
let s = U32.v s in
mul_pow2_diff (U64.v hi) 64 s;
r
let mul_div_spec (n:nat) (k:pos) : Lemma (n / k * k == n - n % k) = ()
let mul_distr_sub (n1 n2:nat) (k:nat) : Lemma ((n1 - n2) * k == n1 * k - n2 * k) = ()
val div_product_comm : n1:nat -> k1:pos -> k2:pos ->
Lemma (n1 / k1 / k2 == n1 / k2 / k1)
let div_product_comm n1 k1 k2 =
div_product n1 k1 k2;
div_product n1 k2 k1
val shift_right_reconstruct : a_h:UInt.uint_t 64 -> s:nat{s < 64} ->
Lemma (a_h * pow2 (64-s) == a_h / pow2 s * pow2 64 + a_h * pow2 64 / pow2 s % pow2 64)
let shift_right_reconstruct a_h s =
mul_pow2_diff a_h 64 s;
mod_spec_rew_n (a_h * pow2 (64-s)) (pow2 64);
div_product_comm (a_h * pow2 64) (pow2 s) (pow2 64);
mul_div_cancel a_h (pow2 64);
assert (a_h / pow2 s * pow2 64 == a_h * pow2 64 / pow2 s / pow2 64 * pow2 64);
()
val u128_div_pow2 (a: t) (s:nat{s < 64}) :
Lemma (v a / pow2 s == U64.v a.low / pow2 s + U64.v a.high * pow2 (64 - s))
let u128_div_pow2 a s =
Math.pow2_plus (64-s) s;
Math.paren_mul_right (U64.v a.high) (pow2 (64-s)) (pow2 s);
Math.division_addition_lemma (U64.v a.low) (pow2 s) (U64.v a.high * pow2 (64 - s))
let shift_right_small (a: t) (s: U32.t{U32.v s < 64}) : Pure t
(requires True)
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
if U32.eq s 0ul then a
else
let r = { low = add_u64_shift_right_respec a.high a.low s;
high = U64.shift_right a.high s; } in
let a_h = U64.v a.high in
let a_l = U64.v a.low in
let s = U32.v s in
shift_right_reconstruct a_h s;
assert (v r == a_h * pow2 (64-s) + a_l / pow2 s);
u128_div_pow2 a s;
r
let shift_right_large (a: t) (s: U32.t{U32.v s >= 64 /\ U32.v s < 128}) : Pure t
(requires True)
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
let r = { high = U64.uint_to_t 0;
low = U64.shift_right a.high (U32.sub s u32_64); } in
let s = U32.v s in
Math.pow2_plus 64 (s - 64);
div_product (v a) (pow2 64) (pow2 (s - 64));
assert (v a / pow2 s == v a / pow2 64 / pow2 (s - 64));
div_plus_multiple (U64.v a.low) (U64.v a.high) (pow2 64);
r
let shift_right (a: t) (s: U32.t) : Pure t
(requires (U32.v s < 128))
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
if U32.lt s u32_64
then shift_right_small a s
else shift_right_large a s
let eq (a b:t) = U64.eq a.low b.low && U64.eq a.high b.high
let gt (a b:t) = U64.gt a.high b.high ||
(U64.eq a.high b.high && U64.gt a.low b.low)
let lt (a b:t) = U64.lt a.high b.high ||
(U64.eq a.high b.high && U64.lt a.low b.low)
let gte (a b:t) = U64.gt a.high b.high ||
(U64.eq a.high b.high && U64.gte a.low b.low)
let lte (a b:t) = U64.lt a.high b.high ||
(U64.eq a.high b.high && U64.lte a.low b.low)
let u64_logand_comm (a b:U64.t) : Lemma (U64.logand a b == U64.logand b a) =
UInt.logand_commutative (U64.v a) (U64.v b)
val u64_and_0 (a b:U64.t) :
Lemma (U64.v b = 0 ==> U64.v (U64.logand a b) = 0)
[SMTPat (U64.logand a b)]
let u64_and_0 a b = UInt.logand_lemma_1 (U64.v a)
let u64_0_and (a b:U64.t) :
Lemma (U64.v a = 0 ==> U64.v (U64.logand a b) = 0)
[SMTPat (U64.logand a b)] =
u64_logand_comm a b
val u64_1s_and (a b:U64.t) :
Lemma (U64.v a = pow2 64 - 1 /\
U64.v b = pow2 64 - 1 ==> U64.v (U64.logand a b) = pow2 64 - 1)
[SMTPat (U64.logand a b)]
let u64_1s_and a b = UInt.logand_lemma_2 (U64.v a)
let eq_mask (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a = v b ==> v r = pow2 128 - 1) /\ (v a <> v b ==> v r = 0))) =
let mask = U64.logand (U64.eq_mask a.low b.low)
(U64.eq_mask a.high b.high) in
{ low = mask; high = mask; }
private let gte_characterization (a b: t) :
Lemma (v a >= v b ==>
U64.v a.high > U64.v b.high \/
(U64.v a.high = U64.v b.high /\ U64.v a.low >= U64.v b.low)) = ()
private let lt_characterization (a b: t) :
Lemma (v a < v b ==>
U64.v a.high < U64.v b.high \/
(U64.v a.high = U64.v b.high /\ U64.v a.low < U64.v b.low)) = ()
let u64_logor_comm (a b:U64.t) : Lemma (U64.logor a b == U64.logor b a) =
UInt.logor_commutative (U64.v a) (U64.v b)
val u64_or_1 (a b:U64.t) :
Lemma (U64.v b = pow2 64 - 1 ==> U64.v (U64.logor a b) = pow2 64 - 1)
[SMTPat (U64.logor a b)]
let u64_or_1 a b = UInt.logor_lemma_2 (U64.v a)
let u64_1_or (a b:U64.t) :
Lemma (U64.v a = pow2 64 - 1 ==> U64.v (U64.logor a b) = pow2 64 - 1)
[SMTPat (U64.logor a b)] =
u64_logor_comm a b
val u64_or_0 (a b:U64.t) :
Lemma (U64.v a = 0 /\ U64.v b = 0 ==> U64.v (U64.logor a b) = 0)
[SMTPat (U64.logor a b)]
let u64_or_0 a b = UInt.logor_lemma_1 (U64.v a)
val u64_not_0 (a:U64.t) :
Lemma (U64.v a = 0 ==> U64.v (U64.lognot a) = pow2 64 - 1)
[SMTPat (U64.lognot a)]
let u64_not_0 a = UInt.lognot_lemma_1 #64
val u64_not_1 (a:U64.t) :
Lemma (U64.v a = pow2 64 - 1 ==> U64.v (U64.lognot a) = 0)
[SMTPat (U64.lognot a)]
let u64_not_1 a =
UInt.nth_lemma (UInt.lognot #64 (UInt.ones 64)) (UInt.zero 64)
let gte_mask (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a >= v b ==> v r = pow2 128 - 1) /\ (v a < v b ==> v r = 0))) =
let mask_hi_gte = U64.logand (U64.gte_mask a.high b.high)
(U64.lognot (U64.eq_mask a.high b.high)) in
let mask_lo_gte = U64.logand (U64.eq_mask a.high b.high)
(U64.gte_mask a.low b.low) in
let mask = U64.logor mask_hi_gte mask_lo_gte in
gte_characterization a b;
lt_characterization a b;
{ low = mask; high = mask; }
let uint64_to_uint128 (a:U64.t) = { low = a; high = U64.uint_to_t 0; }
let uint128_to_uint64 (a:t) : b:U64.t{U64.v b == v a % pow2 64} = a.low
inline_for_extraction
let u64_l32_mask: x:U64.t{U64.v x == pow2 32 - 1} = U64.uint_to_t 0xffffffff
let u64_mod_32 (a: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r = U64.v a % pow2 32)) =
UInt.logand_mask (U64.v a) 32;
U64.logand a u64_l32_mask
let u64_32_digits (a: U64.t) : Lemma (U64.v a / pow2 32 * pow2 32 + U64.v a % pow2 32 == U64.v a) =
div_mod (U64.v a) (pow2 32)
val mul32_digits : x:UInt.uint_t 64 -> y:UInt.uint_t 32 ->
Lemma (x * y == (x / pow2 32 * y) * pow2 32 + (x % pow2 32) * y)
let mul32_digits x y = ()
let u32_32 : x:U32.t{U32.v x == 32} = U32.uint_to_t 32
#push-options "--z3rlimit 30"
let u32_combine (hi lo: U64.t) : Pure U64.t
(requires (U64.v lo < pow2 32))
(ensures (fun r -> U64.v r = U64.v hi % pow2 32 * pow2 32 + U64.v lo)) =
U64.add lo (U64.shift_left hi u32_32)
#pop-options
let product_bound (a b:nat) (k:pos) :
Lemma (requires (a < k /\ b < k)) | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val product_bound (a b: nat) (k: pos)
: Lemma (requires (a < k /\ b < k)) (ensures a * b <= k * k - 2 * k + 1) | [] | FStar.UInt128.product_bound | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: Prims.nat -> b: Prims.nat -> k: Prims.pos
-> FStar.Pervasives.Lemma (requires a < k /\ b < k) (ensures a * b <= k * k - 2 * k + 1) | {
"end_col": 39,
"end_line": 927,
"start_col": 2,
"start_line": 926
} |
Prims.Pure | val add: a:t -> b:t -> Pure t
(requires (size (v a + v b) n))
(ensures (fun c -> v a + v b = v c)) | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let add (a b: t) : Pure t
(requires (v a + v b < pow2 128))
(ensures (fun r -> v a + v b = v r)) =
let l = U64.add_mod a.low b.low in
carry_sum_ok a.low b.low;
{ low = l;
high = U64.add (U64.add a.high b.high) (carry l b.low); } | val add: a:t -> b:t -> Pure t
(requires (size (v a + v b) n))
(ensures (fun c -> v a + v b = v c))
let add (a b: t) : Pure t (requires (v a + v b < pow2 128)) (ensures (fun r -> v a + v b = v r)) = | false | null | false | let l = U64.add_mod a.low b.low in
carry_sum_ok a.low b.low;
{ low = l; high = U64.add (U64.add a.high b.high) (carry l b.low) } | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [] | [
"FStar.UInt128.t",
"FStar.UInt128.Mkuint128",
"FStar.UInt64.add",
"FStar.UInt128.__proj__Mkuint128__item__high",
"FStar.UInt128.carry",
"FStar.UInt128.__proj__Mkuint128__item__low",
"Prims.unit",
"FStar.UInt128.carry_sum_ok",
"FStar.UInt64.t",
"FStar.UInt64.add_mod",
"Prims.b2t",
"Prims.op_LessThan",
"Prims.op_Addition",
"FStar.UInt128.v",
"Prims.pow2",
"Prims.op_Equality",
"Prims.int"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract
let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63
let carry_uint64_ok (a b:uint_t 64)
: squash (int2bv (carry_uint64 a b) == carry_bv a b)
= _ by (T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl())
let fact1 (a b: uint_t 64) = carry_bv a b == int2bv 1
let fact0 (a b: uint_t 64) = carry_bv a b == int2bv 0
let lem_ult_1 (a b: uint_t 64)
: squash (bvult (int2bv a) (int2bv b) ==> fact1 a b)
= assert (bvult (int2bv a) (int2bv b) ==> fact1 a b)
by (T.norm [delta_only [`%fact1;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'";
smt())
let lem_ult_2 (a b:uint_t 64)
: squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
= assert (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
by (T.norm [delta_only [`%fact0;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'")
let int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b))
= introduce (a < b) ==> (bvult #n (int2bv #n a) (int2bv #n b))
with _ . FStar.BV.int2bv_lemma_ult_1 a b;
introduce (bvult #n (int2bv #n a) (int2bv #n b)) ==> (a < b)
with _ . FStar.BV.int2bv_lemma_ult_2 a b
let lem_ult (a b:uint_t 64)
: Lemma (if a < b
then fact1 a b
else fact0 a b)
= int2bv_ult a b;
lem_ult_1 a b;
lem_ult_2 a b
let constant_time_carry (a b: U64.t) : Tot U64.t =
let open U64 in
// CONSTANT_TIME_CARRY macro
// ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
// 63 = sizeof(a) * 8 - 1
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63ul
let carry_uint64_equiv (a b:UInt64.t)
: Lemma (U64.v (constant_time_carry a b) == carry_uint64 (U64.v a) (U64.v b))
= ()
// This type gets a special treatment in KaRaMeL and its definition is never
// printed in the resulting C file.
type uint128: Type0 = { low: U64.t; high: U64.t }
let t = uint128
let _ = intro_ambient n
let _ = intro_ambient t
[@@ noextract_to "krml"]
let v x = U64.v x.low + (U64.v x.high) * (pow2 64)
let div_mod (x:nat) (k:nat{k > 0}) : Lemma (x / k * k + x % k == x) = ()
let uint_to_t x =
div_mod x (pow2 64);
{ low = U64.uint_to_t (x % (pow2 64));
high = U64.uint_to_t (x / (pow2 64)); }
let v_inj (x1 x2: t): Lemma (requires (v x1 == v x2))
(ensures x1 == x2) =
assert (uint_to_t (v x1) == uint_to_t (v x2));
assert (uint_to_t (v x1) == x1);
assert (uint_to_t (v x2) == x2);
()
(* A weird helper used below... seems like the native encoding of
bitvectors may be making these proofs much harder than they should be? *)
let bv2int_fun (#n:pos) (a b : bv_t n)
: Lemma (requires a == b)
(ensures bv2int a == bv2int b)
= ()
(* This proof is quite brittle. It has a bunch of annotations to get
decent verification performance. *)
let constant_time_carry_ok (a b:U64.t)
: Lemma (constant_time_carry a b ==
(if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0))
= calc (==) {
U64.v (constant_time_carry a b);
(==) { carry_uint64_equiv a b }
carry_uint64 (U64.v a) (U64.v b);
(==) { inverse_num_lemma (carry_uint64 (U64.v a) (U64.v b)) }
bv2int (int2bv (carry_uint64 (U64.v a) (U64.v b)));
(==) { carry_uint64_ok (U64.v a) (U64.v b);
bv2int_fun (int2bv (carry_uint64 (U64.v a) (U64.v b))) (carry_bv (U64.v a) (U64.v b));
()
}
bv2int (carry_bv (U64.v a) (U64.v b));
(==) {
lem_ult (U64.v a) (U64.v b);
bv2int_fun (carry_bv (U64.v a) (U64.v b)) (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
}
bv2int
(if U64.v a < U64.v b
then int2bv 1
else int2bv 0);
};
assert (
bv2int (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
== U64.v (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)) by (T.norm []);
U64.v_inj (constant_time_carry a b) (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)
let carry (a b: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r == (if U64.v a < U64.v b then 1 else 0))) =
constant_time_carry_ok a b;
constant_time_carry a b
let carry_sum_ok (a b:U64.t) :
Lemma (U64.v (carry (U64.add_mod a b) b) == (U64.v a + U64.v b) / (pow2 64)) = ()
let add (a b: t) : Pure t | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val add: a:t -> b:t -> Pure t
(requires (size (v a + v b) n))
(ensures (fun c -> v a + v b = v c)) | [] | FStar.UInt128.add | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: FStar.UInt128.t -> b: FStar.UInt128.t -> Prims.Pure FStar.UInt128.t | {
"end_col": 61,
"end_line": 187,
"start_col": 40,
"start_line": 183
} |
Prims.Pure | val eq (a:t) (b:t) : Pure bool
(requires True)
(ensures (fun r -> r == eq #n (v a) (v b))) | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let eq (a b:t) = U64.eq a.low b.low && U64.eq a.high b.high | val eq (a:t) (b:t) : Pure bool
(requires True)
(ensures (fun r -> r == eq #n (v a) (v b)))
let eq (a b: t) = | false | null | false | U64.eq a.low b.low && U64.eq a.high b.high | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [] | [
"FStar.UInt128.t",
"Prims.op_AmpAmp",
"FStar.UInt64.eq",
"FStar.UInt128.__proj__Mkuint128__item__low",
"FStar.UInt128.__proj__Mkuint128__item__high",
"Prims.bool"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract
let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63
let carry_uint64_ok (a b:uint_t 64)
: squash (int2bv (carry_uint64 a b) == carry_bv a b)
= _ by (T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl())
let fact1 (a b: uint_t 64) = carry_bv a b == int2bv 1
let fact0 (a b: uint_t 64) = carry_bv a b == int2bv 0
let lem_ult_1 (a b: uint_t 64)
: squash (bvult (int2bv a) (int2bv b) ==> fact1 a b)
= assert (bvult (int2bv a) (int2bv b) ==> fact1 a b)
by (T.norm [delta_only [`%fact1;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'";
smt())
let lem_ult_2 (a b:uint_t 64)
: squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
= assert (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
by (T.norm [delta_only [`%fact0;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'")
let int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b))
= introduce (a < b) ==> (bvult #n (int2bv #n a) (int2bv #n b))
with _ . FStar.BV.int2bv_lemma_ult_1 a b;
introduce (bvult #n (int2bv #n a) (int2bv #n b)) ==> (a < b)
with _ . FStar.BV.int2bv_lemma_ult_2 a b
let lem_ult (a b:uint_t 64)
: Lemma (if a < b
then fact1 a b
else fact0 a b)
= int2bv_ult a b;
lem_ult_1 a b;
lem_ult_2 a b
let constant_time_carry (a b: U64.t) : Tot U64.t =
let open U64 in
// CONSTANT_TIME_CARRY macro
// ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
// 63 = sizeof(a) * 8 - 1
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63ul
let carry_uint64_equiv (a b:UInt64.t)
: Lemma (U64.v (constant_time_carry a b) == carry_uint64 (U64.v a) (U64.v b))
= ()
// This type gets a special treatment in KaRaMeL and its definition is never
// printed in the resulting C file.
type uint128: Type0 = { low: U64.t; high: U64.t }
let t = uint128
let _ = intro_ambient n
let _ = intro_ambient t
[@@ noextract_to "krml"]
let v x = U64.v x.low + (U64.v x.high) * (pow2 64)
let div_mod (x:nat) (k:nat{k > 0}) : Lemma (x / k * k + x % k == x) = ()
let uint_to_t x =
div_mod x (pow2 64);
{ low = U64.uint_to_t (x % (pow2 64));
high = U64.uint_to_t (x / (pow2 64)); }
let v_inj (x1 x2: t): Lemma (requires (v x1 == v x2))
(ensures x1 == x2) =
assert (uint_to_t (v x1) == uint_to_t (v x2));
assert (uint_to_t (v x1) == x1);
assert (uint_to_t (v x2) == x2);
()
(* A weird helper used below... seems like the native encoding of
bitvectors may be making these proofs much harder than they should be? *)
let bv2int_fun (#n:pos) (a b : bv_t n)
: Lemma (requires a == b)
(ensures bv2int a == bv2int b)
= ()
(* This proof is quite brittle. It has a bunch of annotations to get
decent verification performance. *)
let constant_time_carry_ok (a b:U64.t)
: Lemma (constant_time_carry a b ==
(if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0))
= calc (==) {
U64.v (constant_time_carry a b);
(==) { carry_uint64_equiv a b }
carry_uint64 (U64.v a) (U64.v b);
(==) { inverse_num_lemma (carry_uint64 (U64.v a) (U64.v b)) }
bv2int (int2bv (carry_uint64 (U64.v a) (U64.v b)));
(==) { carry_uint64_ok (U64.v a) (U64.v b);
bv2int_fun (int2bv (carry_uint64 (U64.v a) (U64.v b))) (carry_bv (U64.v a) (U64.v b));
()
}
bv2int (carry_bv (U64.v a) (U64.v b));
(==) {
lem_ult (U64.v a) (U64.v b);
bv2int_fun (carry_bv (U64.v a) (U64.v b)) (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
}
bv2int
(if U64.v a < U64.v b
then int2bv 1
else int2bv 0);
};
assert (
bv2int (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
== U64.v (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)) by (T.norm []);
U64.v_inj (constant_time_carry a b) (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)
let carry (a b: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r == (if U64.v a < U64.v b then 1 else 0))) =
constant_time_carry_ok a b;
constant_time_carry a b
let carry_sum_ok (a b:U64.t) :
Lemma (U64.v (carry (U64.add_mod a b) b) == (U64.v a + U64.v b) / (pow2 64)) = ()
let add (a b: t) : Pure t
(requires (v a + v b < pow2 128))
(ensures (fun r -> v a + v b = v r)) =
let l = U64.add_mod a.low b.low in
carry_sum_ok a.low b.low;
{ low = l;
high = U64.add (U64.add a.high b.high) (carry l b.low); }
let add_underspec (a b: t) =
let l = U64.add_mod a.low b.low in
begin
if v a + v b < pow2 128
then carry_sum_ok a.low b.low
else ()
end;
{ low = l;
high = U64.add_underspec (U64.add_underspec a.high b.high) (carry l b.low); }
val mod_mod: a:nat -> k:nat{k>0} -> k':nat{k'>0} ->
Lemma ((a % k) % (k'*k) == a % k)
let mod_mod a k k' =
assert (a % k < k);
assert (a % k < k' * k)
let mod_spec (a:nat) (k:nat{k > 0}) :
Lemma (a % k == a - a / k * k) = ()
val div_product : n:nat -> m1:nat{m1>0} -> m2:nat{m2>0} ->
Lemma (n / (m1*m2) == (n / m1) / m2)
let div_product n m1 m2 =
Math.division_multiplication_lemma n m1 m2
val mul_div_cancel : n:nat -> k:nat{k>0} ->
Lemma ((n * k) / k == n)
let mul_div_cancel n k =
Math.cancel_mul_div n k
val mod_mul: n:nat -> k1:pos -> k2:pos ->
Lemma ((n % k2) * k1 == (n * k1) % (k1*k2))
let mod_mul n k1 k2 =
Math.modulo_scale_lemma n k1 k2
let mod_spec_rew_n (n:nat) (k:nat{k > 0}) :
Lemma (n == n / k * k + n % k) = mod_spec n k
val mod_add: n1:nat -> n2:nat -> k:nat{k > 0} ->
Lemma ((n1 % k + n2 % k) % k == (n1 + n2) % k)
let mod_add n1 n2 k = Math.modulo_distributivity n1 n2 k
val mod_add_small: n1:nat -> n2:nat -> k:nat{k > 0} -> Lemma
(requires (n1 % k + n2 % k < k))
(ensures (n1 % k + n2 % k == (n1 + n2) % k))
let mod_add_small n1 n2 k =
mod_add n1 n2 k;
Math.small_modulo_lemma_1 (n1%k + n2%k) k
// This proof is pretty stable with the calc proof, but it can fail
// ~1% of the times, so add a retry.
#push-options "--split_queries no --z3rlimit 20 --retry 5"
let add_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a + v b) % pow2 128 = v r)) =
let l = U64.add_mod a.low b.low in
let r = { low = l;
high = U64.add_mod (U64.add_mod a.high b.high) (carry l b.low)} in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
let b_l = U64.v b.low in
let b_h = U64.v b.high in
carry_sum_ok a.low b.low;
Math.lemma_mod_plus_distr_l (a_h + b_h) ((a_l + b_l) / (pow2 64)) (pow2 64);
calc (==) {
U64.v r.high * pow2 64;
== {}
((a_h + b_h + (a_l + b_l) / (pow2 64)) % pow2 64) * pow2 64;
== { mod_mul (a_h + b_h + (a_l + b_l) / (pow2 64)) (pow2 64) (pow2 64) }
((a_h + b_h + (a_l + b_l) / (pow2 64)) * pow2 64) % (pow2 64 * pow2 64);
== {}
((a_h + b_h + (a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
== {}
(a_h * pow2 64 + b_h * pow2 64 + ((a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
};
assert (U64.v r.low == (U64.v r.low) % pow2 128);
mod_add_small (a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64))
((a_l + b_l) % (pow2 64))
(pow2 128);
assert (U64.v r.low + U64.v r.high * pow2 64 ==
(a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64) + (a_l + b_l) % (pow2 64)) % pow2 128);
mod_spec_rew_n (a_l + b_l) (pow2 64);
assert (v r ==
(a_h * pow2 64 +
b_h * pow2 64 +
a_l + b_l) % pow2 128);
assert_spinoff ((v a + v b) % pow2 128 = v r);
r
#pop-options
#push-options "--retry 5"
let sub (a b: t) : Pure t
(requires (v a - v b >= 0))
(ensures (fun r -> v r = v a - v b)) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub (U64.sub a.high b.high) (carry a.low l); }
#pop-options
let sub_underspec (a b: t) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_underspec (U64.sub_underspec a.high b.high) (carry a.low l); }
let sub_mod_impl (a b: t) : t =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_mod (U64.sub_mod a.high b.high) (carry a.low l); }
#push-options "--retry 10" // flaky
let sub_mod_pos_ok (a b:t) : Lemma
(requires (v a - v b >= 0))
(ensures (v (sub_mod_impl a b) = v a - v b)) =
assert (sub a b == sub_mod_impl a b);
()
#pop-options
val u64_diff_wrap : a:U64.t -> b:U64.t ->
Lemma (requires (U64.v a < U64.v b))
(ensures (U64.v (U64.sub_mod a b) == U64.v a - U64.v b + pow2 64))
let u64_diff_wrap a b = ()
#push-options "--z3rlimit 20"
val sub_mod_wrap1_ok : a:t -> b:t -> Lemma
(requires (v a - v b < 0 /\ U64.v a.low < U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128))
#push-options "--retry 10"
let sub_mod_wrap1_ok a b =
// carry == 1 and subtraction in low wraps
let l = U64.sub_mod a.low b.low in
assert (U64.v (carry a.low l) == 1);
u64_diff_wrap a.low b.low;
// a.high <= b.high since v a < v b;
// case split on equality and strictly less
if U64.v a.high = U64.v b.high then ()
else begin
u64_diff_wrap a.high b.high;
()
end
#pop-options
let sum_lt (a1 a2 b1 b2:nat) : Lemma
(requires (a1 + a2 < b1 + b2 /\ a1 >= b1))
(ensures (a2 < b2)) = ()
let sub_mod_wrap2_ok (a b:t) : Lemma
(requires (v a - v b < 0 /\ U64.v a.low >= U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
// carry == 0, subtraction in low is exact, but subtraction in high
// must wrap since v a < v b
let l = U64.sub_mod a.low b.low in
let r = sub_mod_impl a b in
assert (U64.v l == U64.v a.low - U64.v b.low);
assert (U64.v (carry a.low l) == 0);
sum_lt (U64.v a.low) (U64.v a.high * pow2 64) (U64.v b.low) (U64.v b.high * pow2 64);
assert (U64.v (U64.sub_mod a.high b.high) == U64.v a.high - U64.v b.high + pow2 64);
()
let sub_mod_wrap_ok (a b:t) : Lemma
(requires (v a - v b < 0))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
if U64.v a.low < U64.v b.low
then sub_mod_wrap1_ok a b
else sub_mod_wrap2_ok a b
#push-options "--z3rlimit 40"
let sub_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = (v a - v b) % pow2 128)) =
(if v a - v b >= 0
then sub_mod_pos_ok a b
else sub_mod_wrap_ok a b);
sub_mod_impl a b
#pop-options
val shift_bound : #n:nat -> num:UInt.uint_t n -> n':nat ->
Lemma (num * pow2 n' <= pow2 (n'+n) - pow2 n')
let shift_bound #n num n' =
Math.lemma_mult_le_right (pow2 n') num (pow2 n - 1);
Math.distributivity_sub_left (pow2 n) 1 (pow2 n');
Math.pow2_plus n' n
val append_uint : #n1:nat -> #n2:nat -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 -> UInt.uint_t (n1+n2)
let append_uint #n1 #n2 num1 num2 =
shift_bound num2 n1;
num1 + num2 * pow2 n1
val to_vec_append : #n1:nat{n1 > 0} -> #n2:nat{n2 > 0} -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 ->
Lemma (UInt.to_vec (append_uint num1 num2) == Seq.append (UInt.to_vec num2) (UInt.to_vec num1))
let to_vec_append #n1 #n2 num1 num2 =
UInt.append_lemma (UInt.to_vec num2) (UInt.to_vec num1)
let vec128 (a: t) : BV.bv_t 128 = UInt.to_vec #128 (v a)
let vec64 (a: U64.t) : BV.bv_t 64 = UInt.to_vec (U64.v a)
let to_vec_v (a: t) :
Lemma (vec128 a == Seq.append (vec64 a.high) (vec64 a.low)) =
to_vec_append (U64.v a.low) (U64.v a.high)
val logand_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2) ==
BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2))
(BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logand #128 (v a) (v b))) =
let r = { low = U64.logand a.low b.low;
high = U64.logand a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logand_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logand_vec (vec128 a) (vec128 b));
r
val logxor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2) ==
BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2))
(BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logxor #128 (v a) (v b))) =
let r = { low = U64.logxor a.low b.low;
high = U64.logxor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logxor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logxor_vec (vec128 a) (vec128 b));
r
val logor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2) ==
BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2))
(BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logor #128 (v a) (v b))) =
let r = { low = U64.logor a.low b.low;
high = U64.logor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logor_vec (vec128 a) (vec128 b));
r
val lognot_vec_append (#n1 #n2: pos) (a1: BV.bv_t n1) (a2: BV.bv_t n2) :
Lemma (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2) ==
BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot_vec_append #n1 #n2 a1 a2 =
Seq.lemma_eq_intro (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2))
(BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot (a: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.lognot #128 (v a))) =
let r = { low = U64.lognot a.low;
high = U64.lognot a.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
lognot_vec_append (vec64 a.high) (vec64 a.low);
to_vec_v a;
assert (vec128 r == BV.lognot_vec (vec128 a));
r
let mod_mul_cancel (n:nat) (k:nat{k > 0}) :
Lemma ((n * k) % k == 0) =
mod_spec (n * k) k;
mul_div_cancel n k;
()
let shift_past_mod (n:nat) (k1:nat) (k2:nat{k2 >= k1}) :
Lemma ((n * pow2 k2) % pow2 k1 == 0) =
assert (k2 == (k2 - k1) + k1);
Math.pow2_plus (k2 - k1) k1;
Math.paren_mul_right n (pow2 (k2 - k1)) (pow2 k1);
mod_mul_cancel (n * pow2 (k2 - k1)) (pow2 k1)
val mod_double: a:nat -> k:nat{k>0} ->
Lemma (a % k % k == a % k)
let mod_double a k =
mod_mod a k 1
let shift_left_large_val (#n1:nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s:nat) :
Lemma ((a1 + a2 * pow2 n1) * pow2 s == (a1 * pow2 s + a2 * pow2 (n1+s))) =
Math.distributivity_add_left a1 (a2 * pow2 n1) (pow2 s);
Math.paren_mul_right a2 (pow2 n1) (pow2 s);
Math.pow2_plus n1 s
#push-options "--z3rlimit 40"
let shift_left_large_lemma (#n1: nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s: nat{s >= n2}) :
Lemma (((a1 + a2 * pow2 n1) * pow2 s) % pow2 (n1+n2) ==
(a1 * pow2 s) % pow2 (n1+n2)) =
shift_left_large_val a1 a2 s;
mod_add (a1 * pow2 s) (a2 * pow2 (n1+s)) (pow2 (n1+n2));
shift_past_mod a2 (n1+n2) (n1+s);
mod_double (a1 * pow2 s) (pow2 (n1+n2));
()
#pop-options
val shift_left_large_lemma_t : a:t -> s:nat ->
Lemma (requires (s >= 64))
(ensures ((v a * pow2 s) % pow2 128 ==
(U64.v a.low * pow2 s) % pow2 128))
let shift_left_large_lemma_t a s =
shift_left_large_lemma #64 #64 (U64.v a.low) (U64.v a.high) s
private let u32_64: n:U32.t{U32.v n == 64} = U32.uint_to_t 64
val div_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} -> Lemma
(requires True)
(ensures (a / pow2 (n1 - n2) == a * pow2 n2 / pow2 n1))
let div_pow2_diff a n1 n2 =
Math.pow2_plus n2 (n1-n2);
assert (a * pow2 n2 / pow2 n1 == a * pow2 n2 / (pow2 n2 * pow2 (n1 - n2)));
div_product (a * pow2 n2) (pow2 n2) (pow2 (n1-n2));
mul_div_cancel a (pow2 n2)
val mod_mul_pow2 : n:nat -> e1:nat -> e2:nat ->
Lemma (n % pow2 e1 * pow2 e2 <= pow2 (e1+e2) - pow2 e2)
let mod_mul_pow2 n e1 e2 =
Math.lemma_mod_lt n (pow2 e1);
Math.lemma_mult_le_right (pow2 e2) (n % pow2 e1) (pow2 e1 - 1);
assert (n % pow2 e1 * pow2 e2 <= pow2 e1 * pow2 e2 - pow2 e2);
Math.pow2_plus e1 e2
let pow2_div_bound #b (n:UInt.uint_t b) (s:nat{s <= b}) :
Lemma (n / pow2 s < pow2 (b - s)) =
Math.lemma_div_lt n b s
#push-options "--smtencoding.l_arith_repr native --z3rlimit 40"
let add_u64_shift_left (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r = (U64.v hi * pow2 (U32.v s)) % pow2 64 + U64.v lo / pow2 (64 - U32.v s))) =
let high = U64.shift_left hi s in
let low = U64.shift_right lo (U32.sub u32_64 s) in
let s = U32.v s in
let high_n = U64.v hi % pow2 (64 - s) * pow2 s in
let low_n = U64.v lo / pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 s) (pow2 (64-s));
assert (U64.v high == high_n);
assert (U64.v low == low_n);
pow2_div_bound (U64.v lo) (64-s);
assert (low_n < pow2 s);
mod_mul_pow2 (U64.v hi) (64 - s) s;
U64.add high low
#pop-options
let div_plus_multiple (a:nat) (b:nat) (k:pos) :
Lemma (requires (a < k))
(ensures ((a + b * k) / k == b)) =
Math.division_addition_lemma a k b;
Math.small_division_lemma_1 a k
val div_add_small: n:nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (k1*m / (k1*k2) == (n + k1*m) / (k1*k2)))
let div_add_small n m k1 k2 =
div_product (k1*m) k1 k2;
div_product (n+k1*m) k1 k2;
mul_div_cancel m k1;
assert (k1*m/k1 == m);
div_plus_multiple n m k1
val add_mod_small: n: nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (n + (k1 * m) % (k1 * k2) ==
(n + k1 * m) % (k1 * k2)))
let add_mod_small n m k1 k2 =
mod_spec (k1 * m) (k1 * k2);
mod_spec (n + k1 * m) (k1 * k2);
div_add_small n m k1 k2
let mod_then_mul_64 (n:nat) : Lemma (n % pow2 64 * pow2 64 == n * pow2 64 % pow2 128) =
Math.pow2_plus 64 64;
mod_mul n (pow2 64) (pow2 64)
let mul_abc_to_acb (a b c: int) : Lemma (a * b * c == a * c * b) = ()
let add_u64_shift_left_respec (hi lo:U64.t) (s:U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r ->
U64.v r * pow2 64 ==
(U64.v hi * pow2 64) * pow2 (U32.v s) % pow2 128 +
U64.v lo * pow2 (U32.v s) / pow2 64 * pow2 64)) =
let r = add_u64_shift_left hi lo s in
let hi = U64.v hi in
let lo = U64.v lo in
let s = U32.v s in
// spec of add_u64_shift_left
assert (U64.v r == hi * pow2 s % pow2 64 + lo / pow2 (64 - s));
Math.distributivity_add_left (hi * pow2 s % pow2 64) (lo / pow2 (64-s)) (pow2 64);
mod_then_mul_64 (hi * pow2 s);
assert (hi * pow2 s % pow2 64 * pow2 64 == (hi * pow2 s * pow2 64) % pow2 128);
div_pow2_diff lo 64 s;
assert (lo / pow2 (64-s) == lo * pow2 s / pow2 64);
assert (U64.v r * pow2 64 == hi * pow2 s * pow2 64 % pow2 128 + lo * pow2 s / pow2 64 * pow2 64);
mul_abc_to_acb hi (pow2 s) (pow2 64);
r
val add_mod_small' : n:nat -> m:nat -> k:pos ->
Lemma (requires (n + m % k < k))
(ensures (n + m % k == (n + m) % k))
let add_mod_small' n m k =
Math.lemma_mod_lt (n + m % k) k;
Math.modulo_lemma n k;
mod_add n m k
#push-options "--retry 5"
let shift_t_val (a: t) (s: nat) :
Lemma (v a * pow2 s == U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s)) =
Math.pow2_plus 64 s;
()
#pop-options
val mul_mod_bound : n:nat -> s1:nat -> s2:nat{s2>=s1} ->
Lemma (n * pow2 s1 % pow2 s2 <= pow2 s2 - pow2 s1)
#push-options "--retry 5"
let mul_mod_bound n s1 s2 =
// n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1
// n % pow2 (s2-s1) <= pow2 (s2-s1) - 1
// n % pow2 (s2-s1) * pow2 s1 <= pow2 s2 - pow2 s1
mod_mul n (pow2 s1) (pow2 (s2-s1));
// assert (n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1);
Math.lemma_mod_lt n (pow2 (s2-s1));
Math.lemma_mult_le_right (pow2 s1) (n % pow2 (s2-s1)) (pow2 (s2-s1) - 1);
Math.pow2_plus (s2-s1) s1
#pop-options
let add_lt_le (a a' b b': int) :
Lemma (requires (a < a' /\ b <= b'))
(ensures (a + b < a' + b')) = ()
let u64_pow2_bound (a: UInt.uint_t 64) (s: nat) :
Lemma (a * pow2 s < pow2 (64+s)) =
Math.pow2_plus 64 s;
Math.lemma_mult_le_right (pow2 s) a (pow2 64)
let shift_t_mod_val' (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s) % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
u64_pow2_bound a_l s;
mul_mod_bound a_h (64+s) 128;
// assert (a_h * pow2 (64+s) % pow2 128 <= pow2 128 - pow2 (64+s));
add_lt_le (a_l * pow2 s) (pow2 (64+s)) (a_h * pow2 (64+s) % pow2 128) (pow2 128 - pow2 (64+s));
add_mod_small' (a_l * pow2 s) (a_h * pow2 (64+s)) (pow2 128);
shift_t_val a s;
()
let shift_t_mod_val (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + (U64.v a.high * pow2 64) * pow2 s % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
shift_t_mod_val' a s;
Math.pow2_plus 64 s;
Math.paren_mul_right a_h (pow2 64) (pow2 s);
()
#push-options "--z3rlimit 20"
let shift_left_small (a: t) (s: U32.t) : Pure t
(requires (U32.v s < 64))
(ensures (fun r -> v r = (v a * pow2 (U32.v s)) % pow2 128)) =
if U32.eq s 0ul then a
else
let r = { low = U64.shift_left a.low s;
high = add_u64_shift_left_respec a.high a.low s; } in
let s = U32.v s in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
mod_spec_rew_n (a_l * pow2 s) (pow2 64);
shift_t_mod_val a s;
r
#pop-options
val shift_left_large : a:t -> s:U32.t{U32.v s >= 64 /\ U32.v s < 128} ->
r:t{v r = (v a * pow2 (U32.v s)) % pow2 128}
#push-options "--z3rlimit 50 --retry 5" // sporadically fails
let shift_left_large a s =
let h_shift = U32.sub s u32_64 in
assert (U32.v h_shift < 64);
let r = { low = U64.uint_to_t 0;
high = U64.shift_left a.low h_shift; } in
assert (U64.v r.high == (U64.v a.low * pow2 (U32.v s - 64)) % pow2 64);
mod_mul (U64.v a.low * pow2 (U32.v s - 64)) (pow2 64) (pow2 64);
Math.pow2_plus (U32.v s - 64) 64;
assert (U64.v r.high * pow2 64 == (U64.v a.low * pow2 (U32.v s)) % pow2 128);
shift_left_large_lemma_t a (U32.v s);
r
#pop-options
let shift_left a s =
if (U32.lt s u32_64) then shift_left_small a s
else shift_left_large a s
let add_u64_shift_right (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r == U64.v lo / pow2 (U32.v s) +
U64.v hi * pow2 (64 - U32.v s) % pow2 64)) =
let low = U64.shift_right lo s in
let high = U64.shift_left hi (U32.sub u32_64 s) in
let s = U32.v s in
let low_n = U64.v lo / pow2 s in
let high_n = U64.v hi % pow2 s * pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 (64-s)) (pow2 s);
assert (U64.v high == high_n);
pow2_div_bound (U64.v lo) s;
assert (low_n < pow2 (64 - s));
mod_mul_pow2 (U64.v hi) s (64 - s);
U64.add low high
val mul_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} ->
Lemma (a * pow2 (n1 - n2) == a * pow2 n1 / pow2 n2)
let mul_pow2_diff a n1 n2 =
Math.paren_mul_right a (pow2 (n1-n2)) (pow2 n2);
mul_div_cancel (a * pow2 (n1 - n2)) (pow2 n2);
Math.pow2_plus (n1 - n2) n2
let add_u64_shift_right_respec (hi lo:U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r == U64.v lo / pow2 (U32.v s) +
U64.v hi * pow2 64 / pow2 (U32.v s) % pow2 64)) =
let r = add_u64_shift_right hi lo s in
let s = U32.v s in
mul_pow2_diff (U64.v hi) 64 s;
r
let mul_div_spec (n:nat) (k:pos) : Lemma (n / k * k == n - n % k) = ()
let mul_distr_sub (n1 n2:nat) (k:nat) : Lemma ((n1 - n2) * k == n1 * k - n2 * k) = ()
val div_product_comm : n1:nat -> k1:pos -> k2:pos ->
Lemma (n1 / k1 / k2 == n1 / k2 / k1)
let div_product_comm n1 k1 k2 =
div_product n1 k1 k2;
div_product n1 k2 k1
val shift_right_reconstruct : a_h:UInt.uint_t 64 -> s:nat{s < 64} ->
Lemma (a_h * pow2 (64-s) == a_h / pow2 s * pow2 64 + a_h * pow2 64 / pow2 s % pow2 64)
let shift_right_reconstruct a_h s =
mul_pow2_diff a_h 64 s;
mod_spec_rew_n (a_h * pow2 (64-s)) (pow2 64);
div_product_comm (a_h * pow2 64) (pow2 s) (pow2 64);
mul_div_cancel a_h (pow2 64);
assert (a_h / pow2 s * pow2 64 == a_h * pow2 64 / pow2 s / pow2 64 * pow2 64);
()
val u128_div_pow2 (a: t) (s:nat{s < 64}) :
Lemma (v a / pow2 s == U64.v a.low / pow2 s + U64.v a.high * pow2 (64 - s))
let u128_div_pow2 a s =
Math.pow2_plus (64-s) s;
Math.paren_mul_right (U64.v a.high) (pow2 (64-s)) (pow2 s);
Math.division_addition_lemma (U64.v a.low) (pow2 s) (U64.v a.high * pow2 (64 - s))
let shift_right_small (a: t) (s: U32.t{U32.v s < 64}) : Pure t
(requires True)
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
if U32.eq s 0ul then a
else
let r = { low = add_u64_shift_right_respec a.high a.low s;
high = U64.shift_right a.high s; } in
let a_h = U64.v a.high in
let a_l = U64.v a.low in
let s = U32.v s in
shift_right_reconstruct a_h s;
assert (v r == a_h * pow2 (64-s) + a_l / pow2 s);
u128_div_pow2 a s;
r
let shift_right_large (a: t) (s: U32.t{U32.v s >= 64 /\ U32.v s < 128}) : Pure t
(requires True)
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
let r = { high = U64.uint_to_t 0;
low = U64.shift_right a.high (U32.sub s u32_64); } in
let s = U32.v s in
Math.pow2_plus 64 (s - 64);
div_product (v a) (pow2 64) (pow2 (s - 64));
assert (v a / pow2 s == v a / pow2 64 / pow2 (s - 64));
div_plus_multiple (U64.v a.low) (U64.v a.high) (pow2 64);
r
let shift_right (a: t) (s: U32.t) : Pure t
(requires (U32.v s < 128))
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
if U32.lt s u32_64
then shift_right_small a s
else shift_right_large a s | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val eq (a:t) (b:t) : Pure bool
(requires True)
(ensures (fun r -> r == eq #n (v a) (v b))) | [] | FStar.UInt128.eq | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: FStar.UInt128.t -> b: FStar.UInt128.t -> Prims.Pure Prims.bool | {
"end_col": 59,
"end_line": 807,
"start_col": 17,
"start_line": 807
} |
Prims.Pure | val logxor: a:t -> b:t -> Pure t
(requires True)
(ensures (fun r -> v r == logxor (v a) (v b))) | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let logxor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logxor #128 (v a) (v b))) =
let r = { low = U64.logxor a.low b.low;
high = U64.logxor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logxor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logxor_vec (vec128 a) (vec128 b));
r | val logxor: a:t -> b:t -> Pure t
(requires True)
(ensures (fun r -> v r == logxor (v a) (v b)))
let logxor (a b: t) : Pure t (requires True) (ensures (fun r -> v r = UInt.logxor #128 (v a) (v b))) = | false | null | false | let r = { low = U64.logxor a.low b.low; high = U64.logxor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logxor_vec_append (vec64 a.high) (vec64 b.high) (vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logxor_vec (vec128 a) (vec128 b));
r | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [] | [
"FStar.UInt128.t",
"Prims.unit",
"Prims._assert",
"Prims.eq2",
"FStar.BitVector.bv_t",
"FStar.UInt128.vec128",
"FStar.BitVector.logxor_vec",
"FStar.UInt128.to_vec_v",
"FStar.UInt128.logxor_vec_append",
"FStar.UInt128.vec64",
"FStar.UInt128.__proj__Mkuint128__item__high",
"FStar.UInt128.__proj__Mkuint128__item__low",
"FStar.Seq.Base.seq",
"Prims.bool",
"FStar.Seq.Base.append",
"FStar.UInt128.uint128",
"FStar.UInt128.Mkuint128",
"FStar.UInt64.logxor",
"Prims.l_True",
"Prims.b2t",
"Prims.op_Equality",
"FStar.UInt.uint_t",
"FStar.UInt128.n",
"FStar.UInt128.v",
"FStar.UInt.logxor"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract
let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63
let carry_uint64_ok (a b:uint_t 64)
: squash (int2bv (carry_uint64 a b) == carry_bv a b)
= _ by (T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl())
let fact1 (a b: uint_t 64) = carry_bv a b == int2bv 1
let fact0 (a b: uint_t 64) = carry_bv a b == int2bv 0
let lem_ult_1 (a b: uint_t 64)
: squash (bvult (int2bv a) (int2bv b) ==> fact1 a b)
= assert (bvult (int2bv a) (int2bv b) ==> fact1 a b)
by (T.norm [delta_only [`%fact1;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'";
smt())
let lem_ult_2 (a b:uint_t 64)
: squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
= assert (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
by (T.norm [delta_only [`%fact0;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'")
let int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b))
= introduce (a < b) ==> (bvult #n (int2bv #n a) (int2bv #n b))
with _ . FStar.BV.int2bv_lemma_ult_1 a b;
introduce (bvult #n (int2bv #n a) (int2bv #n b)) ==> (a < b)
with _ . FStar.BV.int2bv_lemma_ult_2 a b
let lem_ult (a b:uint_t 64)
: Lemma (if a < b
then fact1 a b
else fact0 a b)
= int2bv_ult a b;
lem_ult_1 a b;
lem_ult_2 a b
let constant_time_carry (a b: U64.t) : Tot U64.t =
let open U64 in
// CONSTANT_TIME_CARRY macro
// ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
// 63 = sizeof(a) * 8 - 1
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63ul
let carry_uint64_equiv (a b:UInt64.t)
: Lemma (U64.v (constant_time_carry a b) == carry_uint64 (U64.v a) (U64.v b))
= ()
// This type gets a special treatment in KaRaMeL and its definition is never
// printed in the resulting C file.
type uint128: Type0 = { low: U64.t; high: U64.t }
let t = uint128
let _ = intro_ambient n
let _ = intro_ambient t
[@@ noextract_to "krml"]
let v x = U64.v x.low + (U64.v x.high) * (pow2 64)
let div_mod (x:nat) (k:nat{k > 0}) : Lemma (x / k * k + x % k == x) = ()
let uint_to_t x =
div_mod x (pow2 64);
{ low = U64.uint_to_t (x % (pow2 64));
high = U64.uint_to_t (x / (pow2 64)); }
let v_inj (x1 x2: t): Lemma (requires (v x1 == v x2))
(ensures x1 == x2) =
assert (uint_to_t (v x1) == uint_to_t (v x2));
assert (uint_to_t (v x1) == x1);
assert (uint_to_t (v x2) == x2);
()
(* A weird helper used below... seems like the native encoding of
bitvectors may be making these proofs much harder than they should be? *)
let bv2int_fun (#n:pos) (a b : bv_t n)
: Lemma (requires a == b)
(ensures bv2int a == bv2int b)
= ()
(* This proof is quite brittle. It has a bunch of annotations to get
decent verification performance. *)
let constant_time_carry_ok (a b:U64.t)
: Lemma (constant_time_carry a b ==
(if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0))
= calc (==) {
U64.v (constant_time_carry a b);
(==) { carry_uint64_equiv a b }
carry_uint64 (U64.v a) (U64.v b);
(==) { inverse_num_lemma (carry_uint64 (U64.v a) (U64.v b)) }
bv2int (int2bv (carry_uint64 (U64.v a) (U64.v b)));
(==) { carry_uint64_ok (U64.v a) (U64.v b);
bv2int_fun (int2bv (carry_uint64 (U64.v a) (U64.v b))) (carry_bv (U64.v a) (U64.v b));
()
}
bv2int (carry_bv (U64.v a) (U64.v b));
(==) {
lem_ult (U64.v a) (U64.v b);
bv2int_fun (carry_bv (U64.v a) (U64.v b)) (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
}
bv2int
(if U64.v a < U64.v b
then int2bv 1
else int2bv 0);
};
assert (
bv2int (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
== U64.v (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)) by (T.norm []);
U64.v_inj (constant_time_carry a b) (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)
let carry (a b: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r == (if U64.v a < U64.v b then 1 else 0))) =
constant_time_carry_ok a b;
constant_time_carry a b
let carry_sum_ok (a b:U64.t) :
Lemma (U64.v (carry (U64.add_mod a b) b) == (U64.v a + U64.v b) / (pow2 64)) = ()
let add (a b: t) : Pure t
(requires (v a + v b < pow2 128))
(ensures (fun r -> v a + v b = v r)) =
let l = U64.add_mod a.low b.low in
carry_sum_ok a.low b.low;
{ low = l;
high = U64.add (U64.add a.high b.high) (carry l b.low); }
let add_underspec (a b: t) =
let l = U64.add_mod a.low b.low in
begin
if v a + v b < pow2 128
then carry_sum_ok a.low b.low
else ()
end;
{ low = l;
high = U64.add_underspec (U64.add_underspec a.high b.high) (carry l b.low); }
val mod_mod: a:nat -> k:nat{k>0} -> k':nat{k'>0} ->
Lemma ((a % k) % (k'*k) == a % k)
let mod_mod a k k' =
assert (a % k < k);
assert (a % k < k' * k)
let mod_spec (a:nat) (k:nat{k > 0}) :
Lemma (a % k == a - a / k * k) = ()
val div_product : n:nat -> m1:nat{m1>0} -> m2:nat{m2>0} ->
Lemma (n / (m1*m2) == (n / m1) / m2)
let div_product n m1 m2 =
Math.division_multiplication_lemma n m1 m2
val mul_div_cancel : n:nat -> k:nat{k>0} ->
Lemma ((n * k) / k == n)
let mul_div_cancel n k =
Math.cancel_mul_div n k
val mod_mul: n:nat -> k1:pos -> k2:pos ->
Lemma ((n % k2) * k1 == (n * k1) % (k1*k2))
let mod_mul n k1 k2 =
Math.modulo_scale_lemma n k1 k2
let mod_spec_rew_n (n:nat) (k:nat{k > 0}) :
Lemma (n == n / k * k + n % k) = mod_spec n k
val mod_add: n1:nat -> n2:nat -> k:nat{k > 0} ->
Lemma ((n1 % k + n2 % k) % k == (n1 + n2) % k)
let mod_add n1 n2 k = Math.modulo_distributivity n1 n2 k
val mod_add_small: n1:nat -> n2:nat -> k:nat{k > 0} -> Lemma
(requires (n1 % k + n2 % k < k))
(ensures (n1 % k + n2 % k == (n1 + n2) % k))
let mod_add_small n1 n2 k =
mod_add n1 n2 k;
Math.small_modulo_lemma_1 (n1%k + n2%k) k
// This proof is pretty stable with the calc proof, but it can fail
// ~1% of the times, so add a retry.
#push-options "--split_queries no --z3rlimit 20 --retry 5"
let add_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a + v b) % pow2 128 = v r)) =
let l = U64.add_mod a.low b.low in
let r = { low = l;
high = U64.add_mod (U64.add_mod a.high b.high) (carry l b.low)} in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
let b_l = U64.v b.low in
let b_h = U64.v b.high in
carry_sum_ok a.low b.low;
Math.lemma_mod_plus_distr_l (a_h + b_h) ((a_l + b_l) / (pow2 64)) (pow2 64);
calc (==) {
U64.v r.high * pow2 64;
== {}
((a_h + b_h + (a_l + b_l) / (pow2 64)) % pow2 64) * pow2 64;
== { mod_mul (a_h + b_h + (a_l + b_l) / (pow2 64)) (pow2 64) (pow2 64) }
((a_h + b_h + (a_l + b_l) / (pow2 64)) * pow2 64) % (pow2 64 * pow2 64);
== {}
((a_h + b_h + (a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
== {}
(a_h * pow2 64 + b_h * pow2 64 + ((a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
};
assert (U64.v r.low == (U64.v r.low) % pow2 128);
mod_add_small (a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64))
((a_l + b_l) % (pow2 64))
(pow2 128);
assert (U64.v r.low + U64.v r.high * pow2 64 ==
(a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64) + (a_l + b_l) % (pow2 64)) % pow2 128);
mod_spec_rew_n (a_l + b_l) (pow2 64);
assert (v r ==
(a_h * pow2 64 +
b_h * pow2 64 +
a_l + b_l) % pow2 128);
assert_spinoff ((v a + v b) % pow2 128 = v r);
r
#pop-options
#push-options "--retry 5"
let sub (a b: t) : Pure t
(requires (v a - v b >= 0))
(ensures (fun r -> v r = v a - v b)) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub (U64.sub a.high b.high) (carry a.low l); }
#pop-options
let sub_underspec (a b: t) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_underspec (U64.sub_underspec a.high b.high) (carry a.low l); }
let sub_mod_impl (a b: t) : t =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_mod (U64.sub_mod a.high b.high) (carry a.low l); }
#push-options "--retry 10" // flaky
let sub_mod_pos_ok (a b:t) : Lemma
(requires (v a - v b >= 0))
(ensures (v (sub_mod_impl a b) = v a - v b)) =
assert (sub a b == sub_mod_impl a b);
()
#pop-options
val u64_diff_wrap : a:U64.t -> b:U64.t ->
Lemma (requires (U64.v a < U64.v b))
(ensures (U64.v (U64.sub_mod a b) == U64.v a - U64.v b + pow2 64))
let u64_diff_wrap a b = ()
#push-options "--z3rlimit 20"
val sub_mod_wrap1_ok : a:t -> b:t -> Lemma
(requires (v a - v b < 0 /\ U64.v a.low < U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128))
#push-options "--retry 10"
let sub_mod_wrap1_ok a b =
// carry == 1 and subtraction in low wraps
let l = U64.sub_mod a.low b.low in
assert (U64.v (carry a.low l) == 1);
u64_diff_wrap a.low b.low;
// a.high <= b.high since v a < v b;
// case split on equality and strictly less
if U64.v a.high = U64.v b.high then ()
else begin
u64_diff_wrap a.high b.high;
()
end
#pop-options
let sum_lt (a1 a2 b1 b2:nat) : Lemma
(requires (a1 + a2 < b1 + b2 /\ a1 >= b1))
(ensures (a2 < b2)) = ()
let sub_mod_wrap2_ok (a b:t) : Lemma
(requires (v a - v b < 0 /\ U64.v a.low >= U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
// carry == 0, subtraction in low is exact, but subtraction in high
// must wrap since v a < v b
let l = U64.sub_mod a.low b.low in
let r = sub_mod_impl a b in
assert (U64.v l == U64.v a.low - U64.v b.low);
assert (U64.v (carry a.low l) == 0);
sum_lt (U64.v a.low) (U64.v a.high * pow2 64) (U64.v b.low) (U64.v b.high * pow2 64);
assert (U64.v (U64.sub_mod a.high b.high) == U64.v a.high - U64.v b.high + pow2 64);
()
let sub_mod_wrap_ok (a b:t) : Lemma
(requires (v a - v b < 0))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
if U64.v a.low < U64.v b.low
then sub_mod_wrap1_ok a b
else sub_mod_wrap2_ok a b
#push-options "--z3rlimit 40"
let sub_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = (v a - v b) % pow2 128)) =
(if v a - v b >= 0
then sub_mod_pos_ok a b
else sub_mod_wrap_ok a b);
sub_mod_impl a b
#pop-options
val shift_bound : #n:nat -> num:UInt.uint_t n -> n':nat ->
Lemma (num * pow2 n' <= pow2 (n'+n) - pow2 n')
let shift_bound #n num n' =
Math.lemma_mult_le_right (pow2 n') num (pow2 n - 1);
Math.distributivity_sub_left (pow2 n) 1 (pow2 n');
Math.pow2_plus n' n
val append_uint : #n1:nat -> #n2:nat -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 -> UInt.uint_t (n1+n2)
let append_uint #n1 #n2 num1 num2 =
shift_bound num2 n1;
num1 + num2 * pow2 n1
val to_vec_append : #n1:nat{n1 > 0} -> #n2:nat{n2 > 0} -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 ->
Lemma (UInt.to_vec (append_uint num1 num2) == Seq.append (UInt.to_vec num2) (UInt.to_vec num1))
let to_vec_append #n1 #n2 num1 num2 =
UInt.append_lemma (UInt.to_vec num2) (UInt.to_vec num1)
let vec128 (a: t) : BV.bv_t 128 = UInt.to_vec #128 (v a)
let vec64 (a: U64.t) : BV.bv_t 64 = UInt.to_vec (U64.v a)
let to_vec_v (a: t) :
Lemma (vec128 a == Seq.append (vec64 a.high) (vec64 a.low)) =
to_vec_append (U64.v a.low) (U64.v a.high)
val logand_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2) ==
BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2))
(BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logand #128 (v a) (v b))) =
let r = { low = U64.logand a.low b.low;
high = U64.logand a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logand_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logand_vec (vec128 a) (vec128 b));
r
val logxor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2) ==
BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2))
(BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor (a b: t) : Pure t | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val logxor: a:t -> b:t -> Pure t
(requires True)
(ensures (fun r -> v r == logxor (v a) (v b))) | [] | FStar.UInt128.logxor | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: FStar.UInt128.t -> b: FStar.UInt128.t -> Prims.Pure FStar.UInt128.t | {
"end_col": 3,
"end_line": 435,
"start_col": 59,
"start_line": 425
} |
FStar.Pervasives.Lemma | val to_vec_append : #n1:nat{n1 > 0} -> #n2:nat{n2 > 0} -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 ->
Lemma (UInt.to_vec (append_uint num1 num2) == Seq.append (UInt.to_vec num2) (UInt.to_vec num1)) | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let to_vec_append #n1 #n2 num1 num2 =
UInt.append_lemma (UInt.to_vec num2) (UInt.to_vec num1) | val to_vec_append : #n1:nat{n1 > 0} -> #n2:nat{n2 > 0} -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 ->
Lemma (UInt.to_vec (append_uint num1 num2) == Seq.append (UInt.to_vec num2) (UInt.to_vec num1))
let to_vec_append #n1 #n2 num1 num2 = | false | null | true | UInt.append_lemma (UInt.to_vec num2) (UInt.to_vec num1) | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [
"lemma"
] | [
"Prims.nat",
"Prims.b2t",
"Prims.op_GreaterThan",
"FStar.UInt.uint_t",
"FStar.UInt.append_lemma",
"FStar.UInt.to_vec",
"Prims.unit"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract
let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63
let carry_uint64_ok (a b:uint_t 64)
: squash (int2bv (carry_uint64 a b) == carry_bv a b)
= _ by (T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl())
let fact1 (a b: uint_t 64) = carry_bv a b == int2bv 1
let fact0 (a b: uint_t 64) = carry_bv a b == int2bv 0
let lem_ult_1 (a b: uint_t 64)
: squash (bvult (int2bv a) (int2bv b) ==> fact1 a b)
= assert (bvult (int2bv a) (int2bv b) ==> fact1 a b)
by (T.norm [delta_only [`%fact1;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'";
smt())
let lem_ult_2 (a b:uint_t 64)
: squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
= assert (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
by (T.norm [delta_only [`%fact0;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'")
let int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b))
= introduce (a < b) ==> (bvult #n (int2bv #n a) (int2bv #n b))
with _ . FStar.BV.int2bv_lemma_ult_1 a b;
introduce (bvult #n (int2bv #n a) (int2bv #n b)) ==> (a < b)
with _ . FStar.BV.int2bv_lemma_ult_2 a b
let lem_ult (a b:uint_t 64)
: Lemma (if a < b
then fact1 a b
else fact0 a b)
= int2bv_ult a b;
lem_ult_1 a b;
lem_ult_2 a b
let constant_time_carry (a b: U64.t) : Tot U64.t =
let open U64 in
// CONSTANT_TIME_CARRY macro
// ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
// 63 = sizeof(a) * 8 - 1
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63ul
let carry_uint64_equiv (a b:UInt64.t)
: Lemma (U64.v (constant_time_carry a b) == carry_uint64 (U64.v a) (U64.v b))
= ()
// This type gets a special treatment in KaRaMeL and its definition is never
// printed in the resulting C file.
type uint128: Type0 = { low: U64.t; high: U64.t }
let t = uint128
let _ = intro_ambient n
let _ = intro_ambient t
[@@ noextract_to "krml"]
let v x = U64.v x.low + (U64.v x.high) * (pow2 64)
let div_mod (x:nat) (k:nat{k > 0}) : Lemma (x / k * k + x % k == x) = ()
let uint_to_t x =
div_mod x (pow2 64);
{ low = U64.uint_to_t (x % (pow2 64));
high = U64.uint_to_t (x / (pow2 64)); }
let v_inj (x1 x2: t): Lemma (requires (v x1 == v x2))
(ensures x1 == x2) =
assert (uint_to_t (v x1) == uint_to_t (v x2));
assert (uint_to_t (v x1) == x1);
assert (uint_to_t (v x2) == x2);
()
(* A weird helper used below... seems like the native encoding of
bitvectors may be making these proofs much harder than they should be? *)
let bv2int_fun (#n:pos) (a b : bv_t n)
: Lemma (requires a == b)
(ensures bv2int a == bv2int b)
= ()
(* This proof is quite brittle. It has a bunch of annotations to get
decent verification performance. *)
let constant_time_carry_ok (a b:U64.t)
: Lemma (constant_time_carry a b ==
(if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0))
= calc (==) {
U64.v (constant_time_carry a b);
(==) { carry_uint64_equiv a b }
carry_uint64 (U64.v a) (U64.v b);
(==) { inverse_num_lemma (carry_uint64 (U64.v a) (U64.v b)) }
bv2int (int2bv (carry_uint64 (U64.v a) (U64.v b)));
(==) { carry_uint64_ok (U64.v a) (U64.v b);
bv2int_fun (int2bv (carry_uint64 (U64.v a) (U64.v b))) (carry_bv (U64.v a) (U64.v b));
()
}
bv2int (carry_bv (U64.v a) (U64.v b));
(==) {
lem_ult (U64.v a) (U64.v b);
bv2int_fun (carry_bv (U64.v a) (U64.v b)) (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
}
bv2int
(if U64.v a < U64.v b
then int2bv 1
else int2bv 0);
};
assert (
bv2int (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
== U64.v (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)) by (T.norm []);
U64.v_inj (constant_time_carry a b) (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)
let carry (a b: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r == (if U64.v a < U64.v b then 1 else 0))) =
constant_time_carry_ok a b;
constant_time_carry a b
let carry_sum_ok (a b:U64.t) :
Lemma (U64.v (carry (U64.add_mod a b) b) == (U64.v a + U64.v b) / (pow2 64)) = ()
let add (a b: t) : Pure t
(requires (v a + v b < pow2 128))
(ensures (fun r -> v a + v b = v r)) =
let l = U64.add_mod a.low b.low in
carry_sum_ok a.low b.low;
{ low = l;
high = U64.add (U64.add a.high b.high) (carry l b.low); }
let add_underspec (a b: t) =
let l = U64.add_mod a.low b.low in
begin
if v a + v b < pow2 128
then carry_sum_ok a.low b.low
else ()
end;
{ low = l;
high = U64.add_underspec (U64.add_underspec a.high b.high) (carry l b.low); }
val mod_mod: a:nat -> k:nat{k>0} -> k':nat{k'>0} ->
Lemma ((a % k) % (k'*k) == a % k)
let mod_mod a k k' =
assert (a % k < k);
assert (a % k < k' * k)
let mod_spec (a:nat) (k:nat{k > 0}) :
Lemma (a % k == a - a / k * k) = ()
val div_product : n:nat -> m1:nat{m1>0} -> m2:nat{m2>0} ->
Lemma (n / (m1*m2) == (n / m1) / m2)
let div_product n m1 m2 =
Math.division_multiplication_lemma n m1 m2
val mul_div_cancel : n:nat -> k:nat{k>0} ->
Lemma ((n * k) / k == n)
let mul_div_cancel n k =
Math.cancel_mul_div n k
val mod_mul: n:nat -> k1:pos -> k2:pos ->
Lemma ((n % k2) * k1 == (n * k1) % (k1*k2))
let mod_mul n k1 k2 =
Math.modulo_scale_lemma n k1 k2
let mod_spec_rew_n (n:nat) (k:nat{k > 0}) :
Lemma (n == n / k * k + n % k) = mod_spec n k
val mod_add: n1:nat -> n2:nat -> k:nat{k > 0} ->
Lemma ((n1 % k + n2 % k) % k == (n1 + n2) % k)
let mod_add n1 n2 k = Math.modulo_distributivity n1 n2 k
val mod_add_small: n1:nat -> n2:nat -> k:nat{k > 0} -> Lemma
(requires (n1 % k + n2 % k < k))
(ensures (n1 % k + n2 % k == (n1 + n2) % k))
let mod_add_small n1 n2 k =
mod_add n1 n2 k;
Math.small_modulo_lemma_1 (n1%k + n2%k) k
// This proof is pretty stable with the calc proof, but it can fail
// ~1% of the times, so add a retry.
#push-options "--split_queries no --z3rlimit 20 --retry 5"
let add_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a + v b) % pow2 128 = v r)) =
let l = U64.add_mod a.low b.low in
let r = { low = l;
high = U64.add_mod (U64.add_mod a.high b.high) (carry l b.low)} in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
let b_l = U64.v b.low in
let b_h = U64.v b.high in
carry_sum_ok a.low b.low;
Math.lemma_mod_plus_distr_l (a_h + b_h) ((a_l + b_l) / (pow2 64)) (pow2 64);
calc (==) {
U64.v r.high * pow2 64;
== {}
((a_h + b_h + (a_l + b_l) / (pow2 64)) % pow2 64) * pow2 64;
== { mod_mul (a_h + b_h + (a_l + b_l) / (pow2 64)) (pow2 64) (pow2 64) }
((a_h + b_h + (a_l + b_l) / (pow2 64)) * pow2 64) % (pow2 64 * pow2 64);
== {}
((a_h + b_h + (a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
== {}
(a_h * pow2 64 + b_h * pow2 64 + ((a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
};
assert (U64.v r.low == (U64.v r.low) % pow2 128);
mod_add_small (a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64))
((a_l + b_l) % (pow2 64))
(pow2 128);
assert (U64.v r.low + U64.v r.high * pow2 64 ==
(a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64) + (a_l + b_l) % (pow2 64)) % pow2 128);
mod_spec_rew_n (a_l + b_l) (pow2 64);
assert (v r ==
(a_h * pow2 64 +
b_h * pow2 64 +
a_l + b_l) % pow2 128);
assert_spinoff ((v a + v b) % pow2 128 = v r);
r
#pop-options
#push-options "--retry 5"
let sub (a b: t) : Pure t
(requires (v a - v b >= 0))
(ensures (fun r -> v r = v a - v b)) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub (U64.sub a.high b.high) (carry a.low l); }
#pop-options
let sub_underspec (a b: t) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_underspec (U64.sub_underspec a.high b.high) (carry a.low l); }
let sub_mod_impl (a b: t) : t =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_mod (U64.sub_mod a.high b.high) (carry a.low l); }
#push-options "--retry 10" // flaky
let sub_mod_pos_ok (a b:t) : Lemma
(requires (v a - v b >= 0))
(ensures (v (sub_mod_impl a b) = v a - v b)) =
assert (sub a b == sub_mod_impl a b);
()
#pop-options
val u64_diff_wrap : a:U64.t -> b:U64.t ->
Lemma (requires (U64.v a < U64.v b))
(ensures (U64.v (U64.sub_mod a b) == U64.v a - U64.v b + pow2 64))
let u64_diff_wrap a b = ()
#push-options "--z3rlimit 20"
val sub_mod_wrap1_ok : a:t -> b:t -> Lemma
(requires (v a - v b < 0 /\ U64.v a.low < U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128))
#push-options "--retry 10"
let sub_mod_wrap1_ok a b =
// carry == 1 and subtraction in low wraps
let l = U64.sub_mod a.low b.low in
assert (U64.v (carry a.low l) == 1);
u64_diff_wrap a.low b.low;
// a.high <= b.high since v a < v b;
// case split on equality and strictly less
if U64.v a.high = U64.v b.high then ()
else begin
u64_diff_wrap a.high b.high;
()
end
#pop-options
let sum_lt (a1 a2 b1 b2:nat) : Lemma
(requires (a1 + a2 < b1 + b2 /\ a1 >= b1))
(ensures (a2 < b2)) = ()
let sub_mod_wrap2_ok (a b:t) : Lemma
(requires (v a - v b < 0 /\ U64.v a.low >= U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
// carry == 0, subtraction in low is exact, but subtraction in high
// must wrap since v a < v b
let l = U64.sub_mod a.low b.low in
let r = sub_mod_impl a b in
assert (U64.v l == U64.v a.low - U64.v b.low);
assert (U64.v (carry a.low l) == 0);
sum_lt (U64.v a.low) (U64.v a.high * pow2 64) (U64.v b.low) (U64.v b.high * pow2 64);
assert (U64.v (U64.sub_mod a.high b.high) == U64.v a.high - U64.v b.high + pow2 64);
()
let sub_mod_wrap_ok (a b:t) : Lemma
(requires (v a - v b < 0))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
if U64.v a.low < U64.v b.low
then sub_mod_wrap1_ok a b
else sub_mod_wrap2_ok a b
#push-options "--z3rlimit 40"
let sub_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = (v a - v b) % pow2 128)) =
(if v a - v b >= 0
then sub_mod_pos_ok a b
else sub_mod_wrap_ok a b);
sub_mod_impl a b
#pop-options
val shift_bound : #n:nat -> num:UInt.uint_t n -> n':nat ->
Lemma (num * pow2 n' <= pow2 (n'+n) - pow2 n')
let shift_bound #n num n' =
Math.lemma_mult_le_right (pow2 n') num (pow2 n - 1);
Math.distributivity_sub_left (pow2 n) 1 (pow2 n');
Math.pow2_plus n' n
val append_uint : #n1:nat -> #n2:nat -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 -> UInt.uint_t (n1+n2)
let append_uint #n1 #n2 num1 num2 =
shift_bound num2 n1;
num1 + num2 * pow2 n1
val to_vec_append : #n1:nat{n1 > 0} -> #n2:nat{n2 > 0} -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 ->
Lemma (UInt.to_vec (append_uint num1 num2) == Seq.append (UInt.to_vec num2) (UInt.to_vec num1)) | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val to_vec_append : #n1:nat{n1 > 0} -> #n2:nat{n2 > 0} -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 ->
Lemma (UInt.to_vec (append_uint num1 num2) == Seq.append (UInt.to_vec num2) (UInt.to_vec num1)) | [] | FStar.UInt128.to_vec_append | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | num1: FStar.UInt.uint_t n1 -> num2: FStar.UInt.uint_t n2
-> FStar.Pervasives.Lemma
(ensures
FStar.UInt.to_vec (FStar.UInt128.append_uint num1 num2) ==
FStar.Seq.Base.append (FStar.UInt.to_vec num2) (FStar.UInt.to_vec num1)) | {
"end_col": 57,
"end_line": 386,
"start_col": 2,
"start_line": 386
} |
FStar.Pervasives.Lemma | val u128_div_pow2 (a: t) (s:nat{s < 64}) :
Lemma (v a / pow2 s == U64.v a.low / pow2 s + U64.v a.high * pow2 (64 - s)) | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let u128_div_pow2 a s =
Math.pow2_plus (64-s) s;
Math.paren_mul_right (U64.v a.high) (pow2 (64-s)) (pow2 s);
Math.division_addition_lemma (U64.v a.low) (pow2 s) (U64.v a.high * pow2 (64 - s)) | val u128_div_pow2 (a: t) (s:nat{s < 64}) :
Lemma (v a / pow2 s == U64.v a.low / pow2 s + U64.v a.high * pow2 (64 - s))
let u128_div_pow2 a s = | false | null | true | Math.pow2_plus (64 - s) s;
Math.paren_mul_right (U64.v a.high) (pow2 (64 - s)) (pow2 s);
Math.division_addition_lemma (U64.v a.low) (pow2 s) (U64.v a.high * pow2 (64 - s)) | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [
"lemma"
] | [
"FStar.UInt128.t",
"Prims.nat",
"Prims.b2t",
"Prims.op_LessThan",
"FStar.Math.Lemmas.division_addition_lemma",
"FStar.UInt64.v",
"FStar.UInt128.__proj__Mkuint128__item__low",
"Prims.pow2",
"FStar.Mul.op_Star",
"FStar.UInt128.__proj__Mkuint128__item__high",
"Prims.op_Subtraction",
"Prims.unit",
"FStar.Math.Lemmas.paren_mul_right",
"FStar.Math.Lemmas.pow2_plus"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract
let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63
let carry_uint64_ok (a b:uint_t 64)
: squash (int2bv (carry_uint64 a b) == carry_bv a b)
= _ by (T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl())
let fact1 (a b: uint_t 64) = carry_bv a b == int2bv 1
let fact0 (a b: uint_t 64) = carry_bv a b == int2bv 0
let lem_ult_1 (a b: uint_t 64)
: squash (bvult (int2bv a) (int2bv b) ==> fact1 a b)
= assert (bvult (int2bv a) (int2bv b) ==> fact1 a b)
by (T.norm [delta_only [`%fact1;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'";
smt())
let lem_ult_2 (a b:uint_t 64)
: squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
= assert (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
by (T.norm [delta_only [`%fact0;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'")
let int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b))
= introduce (a < b) ==> (bvult #n (int2bv #n a) (int2bv #n b))
with _ . FStar.BV.int2bv_lemma_ult_1 a b;
introduce (bvult #n (int2bv #n a) (int2bv #n b)) ==> (a < b)
with _ . FStar.BV.int2bv_lemma_ult_2 a b
let lem_ult (a b:uint_t 64)
: Lemma (if a < b
then fact1 a b
else fact0 a b)
= int2bv_ult a b;
lem_ult_1 a b;
lem_ult_2 a b
let constant_time_carry (a b: U64.t) : Tot U64.t =
let open U64 in
// CONSTANT_TIME_CARRY macro
// ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
// 63 = sizeof(a) * 8 - 1
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63ul
let carry_uint64_equiv (a b:UInt64.t)
: Lemma (U64.v (constant_time_carry a b) == carry_uint64 (U64.v a) (U64.v b))
= ()
// This type gets a special treatment in KaRaMeL and its definition is never
// printed in the resulting C file.
type uint128: Type0 = { low: U64.t; high: U64.t }
let t = uint128
let _ = intro_ambient n
let _ = intro_ambient t
[@@ noextract_to "krml"]
let v x = U64.v x.low + (U64.v x.high) * (pow2 64)
let div_mod (x:nat) (k:nat{k > 0}) : Lemma (x / k * k + x % k == x) = ()
let uint_to_t x =
div_mod x (pow2 64);
{ low = U64.uint_to_t (x % (pow2 64));
high = U64.uint_to_t (x / (pow2 64)); }
let v_inj (x1 x2: t): Lemma (requires (v x1 == v x2))
(ensures x1 == x2) =
assert (uint_to_t (v x1) == uint_to_t (v x2));
assert (uint_to_t (v x1) == x1);
assert (uint_to_t (v x2) == x2);
()
(* A weird helper used below... seems like the native encoding of
bitvectors may be making these proofs much harder than they should be? *)
let bv2int_fun (#n:pos) (a b : bv_t n)
: Lemma (requires a == b)
(ensures bv2int a == bv2int b)
= ()
(* This proof is quite brittle. It has a bunch of annotations to get
decent verification performance. *)
let constant_time_carry_ok (a b:U64.t)
: Lemma (constant_time_carry a b ==
(if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0))
= calc (==) {
U64.v (constant_time_carry a b);
(==) { carry_uint64_equiv a b }
carry_uint64 (U64.v a) (U64.v b);
(==) { inverse_num_lemma (carry_uint64 (U64.v a) (U64.v b)) }
bv2int (int2bv (carry_uint64 (U64.v a) (U64.v b)));
(==) { carry_uint64_ok (U64.v a) (U64.v b);
bv2int_fun (int2bv (carry_uint64 (U64.v a) (U64.v b))) (carry_bv (U64.v a) (U64.v b));
()
}
bv2int (carry_bv (U64.v a) (U64.v b));
(==) {
lem_ult (U64.v a) (U64.v b);
bv2int_fun (carry_bv (U64.v a) (U64.v b)) (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
}
bv2int
(if U64.v a < U64.v b
then int2bv 1
else int2bv 0);
};
assert (
bv2int (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
== U64.v (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)) by (T.norm []);
U64.v_inj (constant_time_carry a b) (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)
let carry (a b: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r == (if U64.v a < U64.v b then 1 else 0))) =
constant_time_carry_ok a b;
constant_time_carry a b
let carry_sum_ok (a b:U64.t) :
Lemma (U64.v (carry (U64.add_mod a b) b) == (U64.v a + U64.v b) / (pow2 64)) = ()
let add (a b: t) : Pure t
(requires (v a + v b < pow2 128))
(ensures (fun r -> v a + v b = v r)) =
let l = U64.add_mod a.low b.low in
carry_sum_ok a.low b.low;
{ low = l;
high = U64.add (U64.add a.high b.high) (carry l b.low); }
let add_underspec (a b: t) =
let l = U64.add_mod a.low b.low in
begin
if v a + v b < pow2 128
then carry_sum_ok a.low b.low
else ()
end;
{ low = l;
high = U64.add_underspec (U64.add_underspec a.high b.high) (carry l b.low); }
val mod_mod: a:nat -> k:nat{k>0} -> k':nat{k'>0} ->
Lemma ((a % k) % (k'*k) == a % k)
let mod_mod a k k' =
assert (a % k < k);
assert (a % k < k' * k)
let mod_spec (a:nat) (k:nat{k > 0}) :
Lemma (a % k == a - a / k * k) = ()
val div_product : n:nat -> m1:nat{m1>0} -> m2:nat{m2>0} ->
Lemma (n / (m1*m2) == (n / m1) / m2)
let div_product n m1 m2 =
Math.division_multiplication_lemma n m1 m2
val mul_div_cancel : n:nat -> k:nat{k>0} ->
Lemma ((n * k) / k == n)
let mul_div_cancel n k =
Math.cancel_mul_div n k
val mod_mul: n:nat -> k1:pos -> k2:pos ->
Lemma ((n % k2) * k1 == (n * k1) % (k1*k2))
let mod_mul n k1 k2 =
Math.modulo_scale_lemma n k1 k2
let mod_spec_rew_n (n:nat) (k:nat{k > 0}) :
Lemma (n == n / k * k + n % k) = mod_spec n k
val mod_add: n1:nat -> n2:nat -> k:nat{k > 0} ->
Lemma ((n1 % k + n2 % k) % k == (n1 + n2) % k)
let mod_add n1 n2 k = Math.modulo_distributivity n1 n2 k
val mod_add_small: n1:nat -> n2:nat -> k:nat{k > 0} -> Lemma
(requires (n1 % k + n2 % k < k))
(ensures (n1 % k + n2 % k == (n1 + n2) % k))
let mod_add_small n1 n2 k =
mod_add n1 n2 k;
Math.small_modulo_lemma_1 (n1%k + n2%k) k
// This proof is pretty stable with the calc proof, but it can fail
// ~1% of the times, so add a retry.
#push-options "--split_queries no --z3rlimit 20 --retry 5"
let add_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a + v b) % pow2 128 = v r)) =
let l = U64.add_mod a.low b.low in
let r = { low = l;
high = U64.add_mod (U64.add_mod a.high b.high) (carry l b.low)} in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
let b_l = U64.v b.low in
let b_h = U64.v b.high in
carry_sum_ok a.low b.low;
Math.lemma_mod_plus_distr_l (a_h + b_h) ((a_l + b_l) / (pow2 64)) (pow2 64);
calc (==) {
U64.v r.high * pow2 64;
== {}
((a_h + b_h + (a_l + b_l) / (pow2 64)) % pow2 64) * pow2 64;
== { mod_mul (a_h + b_h + (a_l + b_l) / (pow2 64)) (pow2 64) (pow2 64) }
((a_h + b_h + (a_l + b_l) / (pow2 64)) * pow2 64) % (pow2 64 * pow2 64);
== {}
((a_h + b_h + (a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
== {}
(a_h * pow2 64 + b_h * pow2 64 + ((a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
};
assert (U64.v r.low == (U64.v r.low) % pow2 128);
mod_add_small (a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64))
((a_l + b_l) % (pow2 64))
(pow2 128);
assert (U64.v r.low + U64.v r.high * pow2 64 ==
(a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64) + (a_l + b_l) % (pow2 64)) % pow2 128);
mod_spec_rew_n (a_l + b_l) (pow2 64);
assert (v r ==
(a_h * pow2 64 +
b_h * pow2 64 +
a_l + b_l) % pow2 128);
assert_spinoff ((v a + v b) % pow2 128 = v r);
r
#pop-options
#push-options "--retry 5"
let sub (a b: t) : Pure t
(requires (v a - v b >= 0))
(ensures (fun r -> v r = v a - v b)) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub (U64.sub a.high b.high) (carry a.low l); }
#pop-options
let sub_underspec (a b: t) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_underspec (U64.sub_underspec a.high b.high) (carry a.low l); }
let sub_mod_impl (a b: t) : t =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_mod (U64.sub_mod a.high b.high) (carry a.low l); }
#push-options "--retry 10" // flaky
let sub_mod_pos_ok (a b:t) : Lemma
(requires (v a - v b >= 0))
(ensures (v (sub_mod_impl a b) = v a - v b)) =
assert (sub a b == sub_mod_impl a b);
()
#pop-options
val u64_diff_wrap : a:U64.t -> b:U64.t ->
Lemma (requires (U64.v a < U64.v b))
(ensures (U64.v (U64.sub_mod a b) == U64.v a - U64.v b + pow2 64))
let u64_diff_wrap a b = ()
#push-options "--z3rlimit 20"
val sub_mod_wrap1_ok : a:t -> b:t -> Lemma
(requires (v a - v b < 0 /\ U64.v a.low < U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128))
#push-options "--retry 10"
let sub_mod_wrap1_ok a b =
// carry == 1 and subtraction in low wraps
let l = U64.sub_mod a.low b.low in
assert (U64.v (carry a.low l) == 1);
u64_diff_wrap a.low b.low;
// a.high <= b.high since v a < v b;
// case split on equality and strictly less
if U64.v a.high = U64.v b.high then ()
else begin
u64_diff_wrap a.high b.high;
()
end
#pop-options
let sum_lt (a1 a2 b1 b2:nat) : Lemma
(requires (a1 + a2 < b1 + b2 /\ a1 >= b1))
(ensures (a2 < b2)) = ()
let sub_mod_wrap2_ok (a b:t) : Lemma
(requires (v a - v b < 0 /\ U64.v a.low >= U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
// carry == 0, subtraction in low is exact, but subtraction in high
// must wrap since v a < v b
let l = U64.sub_mod a.low b.low in
let r = sub_mod_impl a b in
assert (U64.v l == U64.v a.low - U64.v b.low);
assert (U64.v (carry a.low l) == 0);
sum_lt (U64.v a.low) (U64.v a.high * pow2 64) (U64.v b.low) (U64.v b.high * pow2 64);
assert (U64.v (U64.sub_mod a.high b.high) == U64.v a.high - U64.v b.high + pow2 64);
()
let sub_mod_wrap_ok (a b:t) : Lemma
(requires (v a - v b < 0))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
if U64.v a.low < U64.v b.low
then sub_mod_wrap1_ok a b
else sub_mod_wrap2_ok a b
#push-options "--z3rlimit 40"
let sub_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = (v a - v b) % pow2 128)) =
(if v a - v b >= 0
then sub_mod_pos_ok a b
else sub_mod_wrap_ok a b);
sub_mod_impl a b
#pop-options
val shift_bound : #n:nat -> num:UInt.uint_t n -> n':nat ->
Lemma (num * pow2 n' <= pow2 (n'+n) - pow2 n')
let shift_bound #n num n' =
Math.lemma_mult_le_right (pow2 n') num (pow2 n - 1);
Math.distributivity_sub_left (pow2 n) 1 (pow2 n');
Math.pow2_plus n' n
val append_uint : #n1:nat -> #n2:nat -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 -> UInt.uint_t (n1+n2)
let append_uint #n1 #n2 num1 num2 =
shift_bound num2 n1;
num1 + num2 * pow2 n1
val to_vec_append : #n1:nat{n1 > 0} -> #n2:nat{n2 > 0} -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 ->
Lemma (UInt.to_vec (append_uint num1 num2) == Seq.append (UInt.to_vec num2) (UInt.to_vec num1))
let to_vec_append #n1 #n2 num1 num2 =
UInt.append_lemma (UInt.to_vec num2) (UInt.to_vec num1)
let vec128 (a: t) : BV.bv_t 128 = UInt.to_vec #128 (v a)
let vec64 (a: U64.t) : BV.bv_t 64 = UInt.to_vec (U64.v a)
let to_vec_v (a: t) :
Lemma (vec128 a == Seq.append (vec64 a.high) (vec64 a.low)) =
to_vec_append (U64.v a.low) (U64.v a.high)
val logand_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2) ==
BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2))
(BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logand #128 (v a) (v b))) =
let r = { low = U64.logand a.low b.low;
high = U64.logand a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logand_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logand_vec (vec128 a) (vec128 b));
r
val logxor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2) ==
BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2))
(BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logxor #128 (v a) (v b))) =
let r = { low = U64.logxor a.low b.low;
high = U64.logxor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logxor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logxor_vec (vec128 a) (vec128 b));
r
val logor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2) ==
BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2))
(BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logor #128 (v a) (v b))) =
let r = { low = U64.logor a.low b.low;
high = U64.logor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logor_vec (vec128 a) (vec128 b));
r
val lognot_vec_append (#n1 #n2: pos) (a1: BV.bv_t n1) (a2: BV.bv_t n2) :
Lemma (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2) ==
BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot_vec_append #n1 #n2 a1 a2 =
Seq.lemma_eq_intro (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2))
(BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot (a: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.lognot #128 (v a))) =
let r = { low = U64.lognot a.low;
high = U64.lognot a.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
lognot_vec_append (vec64 a.high) (vec64 a.low);
to_vec_v a;
assert (vec128 r == BV.lognot_vec (vec128 a));
r
let mod_mul_cancel (n:nat) (k:nat{k > 0}) :
Lemma ((n * k) % k == 0) =
mod_spec (n * k) k;
mul_div_cancel n k;
()
let shift_past_mod (n:nat) (k1:nat) (k2:nat{k2 >= k1}) :
Lemma ((n * pow2 k2) % pow2 k1 == 0) =
assert (k2 == (k2 - k1) + k1);
Math.pow2_plus (k2 - k1) k1;
Math.paren_mul_right n (pow2 (k2 - k1)) (pow2 k1);
mod_mul_cancel (n * pow2 (k2 - k1)) (pow2 k1)
val mod_double: a:nat -> k:nat{k>0} ->
Lemma (a % k % k == a % k)
let mod_double a k =
mod_mod a k 1
let shift_left_large_val (#n1:nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s:nat) :
Lemma ((a1 + a2 * pow2 n1) * pow2 s == (a1 * pow2 s + a2 * pow2 (n1+s))) =
Math.distributivity_add_left a1 (a2 * pow2 n1) (pow2 s);
Math.paren_mul_right a2 (pow2 n1) (pow2 s);
Math.pow2_plus n1 s
#push-options "--z3rlimit 40"
let shift_left_large_lemma (#n1: nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s: nat{s >= n2}) :
Lemma (((a1 + a2 * pow2 n1) * pow2 s) % pow2 (n1+n2) ==
(a1 * pow2 s) % pow2 (n1+n2)) =
shift_left_large_val a1 a2 s;
mod_add (a1 * pow2 s) (a2 * pow2 (n1+s)) (pow2 (n1+n2));
shift_past_mod a2 (n1+n2) (n1+s);
mod_double (a1 * pow2 s) (pow2 (n1+n2));
()
#pop-options
val shift_left_large_lemma_t : a:t -> s:nat ->
Lemma (requires (s >= 64))
(ensures ((v a * pow2 s) % pow2 128 ==
(U64.v a.low * pow2 s) % pow2 128))
let shift_left_large_lemma_t a s =
shift_left_large_lemma #64 #64 (U64.v a.low) (U64.v a.high) s
private let u32_64: n:U32.t{U32.v n == 64} = U32.uint_to_t 64
val div_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} -> Lemma
(requires True)
(ensures (a / pow2 (n1 - n2) == a * pow2 n2 / pow2 n1))
let div_pow2_diff a n1 n2 =
Math.pow2_plus n2 (n1-n2);
assert (a * pow2 n2 / pow2 n1 == a * pow2 n2 / (pow2 n2 * pow2 (n1 - n2)));
div_product (a * pow2 n2) (pow2 n2) (pow2 (n1-n2));
mul_div_cancel a (pow2 n2)
val mod_mul_pow2 : n:nat -> e1:nat -> e2:nat ->
Lemma (n % pow2 e1 * pow2 e2 <= pow2 (e1+e2) - pow2 e2)
let mod_mul_pow2 n e1 e2 =
Math.lemma_mod_lt n (pow2 e1);
Math.lemma_mult_le_right (pow2 e2) (n % pow2 e1) (pow2 e1 - 1);
assert (n % pow2 e1 * pow2 e2 <= pow2 e1 * pow2 e2 - pow2 e2);
Math.pow2_plus e1 e2
let pow2_div_bound #b (n:UInt.uint_t b) (s:nat{s <= b}) :
Lemma (n / pow2 s < pow2 (b - s)) =
Math.lemma_div_lt n b s
#push-options "--smtencoding.l_arith_repr native --z3rlimit 40"
let add_u64_shift_left (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r = (U64.v hi * pow2 (U32.v s)) % pow2 64 + U64.v lo / pow2 (64 - U32.v s))) =
let high = U64.shift_left hi s in
let low = U64.shift_right lo (U32.sub u32_64 s) in
let s = U32.v s in
let high_n = U64.v hi % pow2 (64 - s) * pow2 s in
let low_n = U64.v lo / pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 s) (pow2 (64-s));
assert (U64.v high == high_n);
assert (U64.v low == low_n);
pow2_div_bound (U64.v lo) (64-s);
assert (low_n < pow2 s);
mod_mul_pow2 (U64.v hi) (64 - s) s;
U64.add high low
#pop-options
let div_plus_multiple (a:nat) (b:nat) (k:pos) :
Lemma (requires (a < k))
(ensures ((a + b * k) / k == b)) =
Math.division_addition_lemma a k b;
Math.small_division_lemma_1 a k
val div_add_small: n:nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (k1*m / (k1*k2) == (n + k1*m) / (k1*k2)))
let div_add_small n m k1 k2 =
div_product (k1*m) k1 k2;
div_product (n+k1*m) k1 k2;
mul_div_cancel m k1;
assert (k1*m/k1 == m);
div_plus_multiple n m k1
val add_mod_small: n: nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (n + (k1 * m) % (k1 * k2) ==
(n + k1 * m) % (k1 * k2)))
let add_mod_small n m k1 k2 =
mod_spec (k1 * m) (k1 * k2);
mod_spec (n + k1 * m) (k1 * k2);
div_add_small n m k1 k2
let mod_then_mul_64 (n:nat) : Lemma (n % pow2 64 * pow2 64 == n * pow2 64 % pow2 128) =
Math.pow2_plus 64 64;
mod_mul n (pow2 64) (pow2 64)
let mul_abc_to_acb (a b c: int) : Lemma (a * b * c == a * c * b) = ()
let add_u64_shift_left_respec (hi lo:U64.t) (s:U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r ->
U64.v r * pow2 64 ==
(U64.v hi * pow2 64) * pow2 (U32.v s) % pow2 128 +
U64.v lo * pow2 (U32.v s) / pow2 64 * pow2 64)) =
let r = add_u64_shift_left hi lo s in
let hi = U64.v hi in
let lo = U64.v lo in
let s = U32.v s in
// spec of add_u64_shift_left
assert (U64.v r == hi * pow2 s % pow2 64 + lo / pow2 (64 - s));
Math.distributivity_add_left (hi * pow2 s % pow2 64) (lo / pow2 (64-s)) (pow2 64);
mod_then_mul_64 (hi * pow2 s);
assert (hi * pow2 s % pow2 64 * pow2 64 == (hi * pow2 s * pow2 64) % pow2 128);
div_pow2_diff lo 64 s;
assert (lo / pow2 (64-s) == lo * pow2 s / pow2 64);
assert (U64.v r * pow2 64 == hi * pow2 s * pow2 64 % pow2 128 + lo * pow2 s / pow2 64 * pow2 64);
mul_abc_to_acb hi (pow2 s) (pow2 64);
r
val add_mod_small' : n:nat -> m:nat -> k:pos ->
Lemma (requires (n + m % k < k))
(ensures (n + m % k == (n + m) % k))
let add_mod_small' n m k =
Math.lemma_mod_lt (n + m % k) k;
Math.modulo_lemma n k;
mod_add n m k
#push-options "--retry 5"
let shift_t_val (a: t) (s: nat) :
Lemma (v a * pow2 s == U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s)) =
Math.pow2_plus 64 s;
()
#pop-options
val mul_mod_bound : n:nat -> s1:nat -> s2:nat{s2>=s1} ->
Lemma (n * pow2 s1 % pow2 s2 <= pow2 s2 - pow2 s1)
#push-options "--retry 5"
let mul_mod_bound n s1 s2 =
// n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1
// n % pow2 (s2-s1) <= pow2 (s2-s1) - 1
// n % pow2 (s2-s1) * pow2 s1 <= pow2 s2 - pow2 s1
mod_mul n (pow2 s1) (pow2 (s2-s1));
// assert (n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1);
Math.lemma_mod_lt n (pow2 (s2-s1));
Math.lemma_mult_le_right (pow2 s1) (n % pow2 (s2-s1)) (pow2 (s2-s1) - 1);
Math.pow2_plus (s2-s1) s1
#pop-options
let add_lt_le (a a' b b': int) :
Lemma (requires (a < a' /\ b <= b'))
(ensures (a + b < a' + b')) = ()
let u64_pow2_bound (a: UInt.uint_t 64) (s: nat) :
Lemma (a * pow2 s < pow2 (64+s)) =
Math.pow2_plus 64 s;
Math.lemma_mult_le_right (pow2 s) a (pow2 64)
let shift_t_mod_val' (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s) % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
u64_pow2_bound a_l s;
mul_mod_bound a_h (64+s) 128;
// assert (a_h * pow2 (64+s) % pow2 128 <= pow2 128 - pow2 (64+s));
add_lt_le (a_l * pow2 s) (pow2 (64+s)) (a_h * pow2 (64+s) % pow2 128) (pow2 128 - pow2 (64+s));
add_mod_small' (a_l * pow2 s) (a_h * pow2 (64+s)) (pow2 128);
shift_t_val a s;
()
let shift_t_mod_val (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + (U64.v a.high * pow2 64) * pow2 s % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
shift_t_mod_val' a s;
Math.pow2_plus 64 s;
Math.paren_mul_right a_h (pow2 64) (pow2 s);
()
#push-options "--z3rlimit 20"
let shift_left_small (a: t) (s: U32.t) : Pure t
(requires (U32.v s < 64))
(ensures (fun r -> v r = (v a * pow2 (U32.v s)) % pow2 128)) =
if U32.eq s 0ul then a
else
let r = { low = U64.shift_left a.low s;
high = add_u64_shift_left_respec a.high a.low s; } in
let s = U32.v s in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
mod_spec_rew_n (a_l * pow2 s) (pow2 64);
shift_t_mod_val a s;
r
#pop-options
val shift_left_large : a:t -> s:U32.t{U32.v s >= 64 /\ U32.v s < 128} ->
r:t{v r = (v a * pow2 (U32.v s)) % pow2 128}
#push-options "--z3rlimit 50 --retry 5" // sporadically fails
let shift_left_large a s =
let h_shift = U32.sub s u32_64 in
assert (U32.v h_shift < 64);
let r = { low = U64.uint_to_t 0;
high = U64.shift_left a.low h_shift; } in
assert (U64.v r.high == (U64.v a.low * pow2 (U32.v s - 64)) % pow2 64);
mod_mul (U64.v a.low * pow2 (U32.v s - 64)) (pow2 64) (pow2 64);
Math.pow2_plus (U32.v s - 64) 64;
assert (U64.v r.high * pow2 64 == (U64.v a.low * pow2 (U32.v s)) % pow2 128);
shift_left_large_lemma_t a (U32.v s);
r
#pop-options
let shift_left a s =
if (U32.lt s u32_64) then shift_left_small a s
else shift_left_large a s
let add_u64_shift_right (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r == U64.v lo / pow2 (U32.v s) +
U64.v hi * pow2 (64 - U32.v s) % pow2 64)) =
let low = U64.shift_right lo s in
let high = U64.shift_left hi (U32.sub u32_64 s) in
let s = U32.v s in
let low_n = U64.v lo / pow2 s in
let high_n = U64.v hi % pow2 s * pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 (64-s)) (pow2 s);
assert (U64.v high == high_n);
pow2_div_bound (U64.v lo) s;
assert (low_n < pow2 (64 - s));
mod_mul_pow2 (U64.v hi) s (64 - s);
U64.add low high
val mul_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} ->
Lemma (a * pow2 (n1 - n2) == a * pow2 n1 / pow2 n2)
let mul_pow2_diff a n1 n2 =
Math.paren_mul_right a (pow2 (n1-n2)) (pow2 n2);
mul_div_cancel (a * pow2 (n1 - n2)) (pow2 n2);
Math.pow2_plus (n1 - n2) n2
let add_u64_shift_right_respec (hi lo:U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r == U64.v lo / pow2 (U32.v s) +
U64.v hi * pow2 64 / pow2 (U32.v s) % pow2 64)) =
let r = add_u64_shift_right hi lo s in
let s = U32.v s in
mul_pow2_diff (U64.v hi) 64 s;
r
let mul_div_spec (n:nat) (k:pos) : Lemma (n / k * k == n - n % k) = ()
let mul_distr_sub (n1 n2:nat) (k:nat) : Lemma ((n1 - n2) * k == n1 * k - n2 * k) = ()
val div_product_comm : n1:nat -> k1:pos -> k2:pos ->
Lemma (n1 / k1 / k2 == n1 / k2 / k1)
let div_product_comm n1 k1 k2 =
div_product n1 k1 k2;
div_product n1 k2 k1
val shift_right_reconstruct : a_h:UInt.uint_t 64 -> s:nat{s < 64} ->
Lemma (a_h * pow2 (64-s) == a_h / pow2 s * pow2 64 + a_h * pow2 64 / pow2 s % pow2 64)
let shift_right_reconstruct a_h s =
mul_pow2_diff a_h 64 s;
mod_spec_rew_n (a_h * pow2 (64-s)) (pow2 64);
div_product_comm (a_h * pow2 64) (pow2 s) (pow2 64);
mul_div_cancel a_h (pow2 64);
assert (a_h / pow2 s * pow2 64 == a_h * pow2 64 / pow2 s / pow2 64 * pow2 64);
()
val u128_div_pow2 (a: t) (s:nat{s < 64}) :
Lemma (v a / pow2 s == U64.v a.low / pow2 s + U64.v a.high * pow2 (64 - s)) | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val u128_div_pow2 (a: t) (s:nat{s < 64}) :
Lemma (v a / pow2 s == U64.v a.low / pow2 s + U64.v a.high * pow2 (64 - s)) | [] | FStar.UInt128.u128_div_pow2 | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: FStar.UInt128.t -> s: Prims.nat{s < 64}
-> FStar.Pervasives.Lemma
(ensures
FStar.UInt128.v a / Prims.pow2 s ==
FStar.UInt64.v (Mkuint128?.low a) / Prims.pow2 s +
FStar.UInt64.v (Mkuint128?.high a) * Prims.pow2 (64 - s)) | {
"end_col": 84,
"end_line": 771,
"start_col": 2,
"start_line": 769
} |
FStar.Pervasives.Lemma | val u64_pow2_bound (a: UInt.uint_t 64) (s: nat) : Lemma (a * pow2 s < pow2 (64 + s)) | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let u64_pow2_bound (a: UInt.uint_t 64) (s: nat) :
Lemma (a * pow2 s < pow2 (64+s)) =
Math.pow2_plus 64 s;
Math.lemma_mult_le_right (pow2 s) a (pow2 64) | val u64_pow2_bound (a: UInt.uint_t 64) (s: nat) : Lemma (a * pow2 s < pow2 (64 + s))
let u64_pow2_bound (a: UInt.uint_t 64) (s: nat) : Lemma (a * pow2 s < pow2 (64 + s)) = | false | null | true | Math.pow2_plus 64 s;
Math.lemma_mult_le_right (pow2 s) a (pow2 64) | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [
"lemma"
] | [
"FStar.UInt.uint_t",
"Prims.nat",
"FStar.Math.Lemmas.lemma_mult_le_right",
"Prims.pow2",
"Prims.unit",
"FStar.Math.Lemmas.pow2_plus",
"Prims.l_True",
"Prims.squash",
"Prims.b2t",
"Prims.op_LessThan",
"FStar.Mul.op_Star",
"Prims.op_Addition",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract
let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63
let carry_uint64_ok (a b:uint_t 64)
: squash (int2bv (carry_uint64 a b) == carry_bv a b)
= _ by (T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl())
let fact1 (a b: uint_t 64) = carry_bv a b == int2bv 1
let fact0 (a b: uint_t 64) = carry_bv a b == int2bv 0
let lem_ult_1 (a b: uint_t 64)
: squash (bvult (int2bv a) (int2bv b) ==> fact1 a b)
= assert (bvult (int2bv a) (int2bv b) ==> fact1 a b)
by (T.norm [delta_only [`%fact1;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'";
smt())
let lem_ult_2 (a b:uint_t 64)
: squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
= assert (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
by (T.norm [delta_only [`%fact0;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'")
let int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b))
= introduce (a < b) ==> (bvult #n (int2bv #n a) (int2bv #n b))
with _ . FStar.BV.int2bv_lemma_ult_1 a b;
introduce (bvult #n (int2bv #n a) (int2bv #n b)) ==> (a < b)
with _ . FStar.BV.int2bv_lemma_ult_2 a b
let lem_ult (a b:uint_t 64)
: Lemma (if a < b
then fact1 a b
else fact0 a b)
= int2bv_ult a b;
lem_ult_1 a b;
lem_ult_2 a b
let constant_time_carry (a b: U64.t) : Tot U64.t =
let open U64 in
// CONSTANT_TIME_CARRY macro
// ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
// 63 = sizeof(a) * 8 - 1
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63ul
let carry_uint64_equiv (a b:UInt64.t)
: Lemma (U64.v (constant_time_carry a b) == carry_uint64 (U64.v a) (U64.v b))
= ()
// This type gets a special treatment in KaRaMeL and its definition is never
// printed in the resulting C file.
type uint128: Type0 = { low: U64.t; high: U64.t }
let t = uint128
let _ = intro_ambient n
let _ = intro_ambient t
[@@ noextract_to "krml"]
let v x = U64.v x.low + (U64.v x.high) * (pow2 64)
let div_mod (x:nat) (k:nat{k > 0}) : Lemma (x / k * k + x % k == x) = ()
let uint_to_t x =
div_mod x (pow2 64);
{ low = U64.uint_to_t (x % (pow2 64));
high = U64.uint_to_t (x / (pow2 64)); }
let v_inj (x1 x2: t): Lemma (requires (v x1 == v x2))
(ensures x1 == x2) =
assert (uint_to_t (v x1) == uint_to_t (v x2));
assert (uint_to_t (v x1) == x1);
assert (uint_to_t (v x2) == x2);
()
(* A weird helper used below... seems like the native encoding of
bitvectors may be making these proofs much harder than they should be? *)
let bv2int_fun (#n:pos) (a b : bv_t n)
: Lemma (requires a == b)
(ensures bv2int a == bv2int b)
= ()
(* This proof is quite brittle. It has a bunch of annotations to get
decent verification performance. *)
let constant_time_carry_ok (a b:U64.t)
: Lemma (constant_time_carry a b ==
(if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0))
= calc (==) {
U64.v (constant_time_carry a b);
(==) { carry_uint64_equiv a b }
carry_uint64 (U64.v a) (U64.v b);
(==) { inverse_num_lemma (carry_uint64 (U64.v a) (U64.v b)) }
bv2int (int2bv (carry_uint64 (U64.v a) (U64.v b)));
(==) { carry_uint64_ok (U64.v a) (U64.v b);
bv2int_fun (int2bv (carry_uint64 (U64.v a) (U64.v b))) (carry_bv (U64.v a) (U64.v b));
()
}
bv2int (carry_bv (U64.v a) (U64.v b));
(==) {
lem_ult (U64.v a) (U64.v b);
bv2int_fun (carry_bv (U64.v a) (U64.v b)) (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
}
bv2int
(if U64.v a < U64.v b
then int2bv 1
else int2bv 0);
};
assert (
bv2int (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
== U64.v (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)) by (T.norm []);
U64.v_inj (constant_time_carry a b) (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)
let carry (a b: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r == (if U64.v a < U64.v b then 1 else 0))) =
constant_time_carry_ok a b;
constant_time_carry a b
let carry_sum_ok (a b:U64.t) :
Lemma (U64.v (carry (U64.add_mod a b) b) == (U64.v a + U64.v b) / (pow2 64)) = ()
let add (a b: t) : Pure t
(requires (v a + v b < pow2 128))
(ensures (fun r -> v a + v b = v r)) =
let l = U64.add_mod a.low b.low in
carry_sum_ok a.low b.low;
{ low = l;
high = U64.add (U64.add a.high b.high) (carry l b.low); }
let add_underspec (a b: t) =
let l = U64.add_mod a.low b.low in
begin
if v a + v b < pow2 128
then carry_sum_ok a.low b.low
else ()
end;
{ low = l;
high = U64.add_underspec (U64.add_underspec a.high b.high) (carry l b.low); }
val mod_mod: a:nat -> k:nat{k>0} -> k':nat{k'>0} ->
Lemma ((a % k) % (k'*k) == a % k)
let mod_mod a k k' =
assert (a % k < k);
assert (a % k < k' * k)
let mod_spec (a:nat) (k:nat{k > 0}) :
Lemma (a % k == a - a / k * k) = ()
val div_product : n:nat -> m1:nat{m1>0} -> m2:nat{m2>0} ->
Lemma (n / (m1*m2) == (n / m1) / m2)
let div_product n m1 m2 =
Math.division_multiplication_lemma n m1 m2
val mul_div_cancel : n:nat -> k:nat{k>0} ->
Lemma ((n * k) / k == n)
let mul_div_cancel n k =
Math.cancel_mul_div n k
val mod_mul: n:nat -> k1:pos -> k2:pos ->
Lemma ((n % k2) * k1 == (n * k1) % (k1*k2))
let mod_mul n k1 k2 =
Math.modulo_scale_lemma n k1 k2
let mod_spec_rew_n (n:nat) (k:nat{k > 0}) :
Lemma (n == n / k * k + n % k) = mod_spec n k
val mod_add: n1:nat -> n2:nat -> k:nat{k > 0} ->
Lemma ((n1 % k + n2 % k) % k == (n1 + n2) % k)
let mod_add n1 n2 k = Math.modulo_distributivity n1 n2 k
val mod_add_small: n1:nat -> n2:nat -> k:nat{k > 0} -> Lemma
(requires (n1 % k + n2 % k < k))
(ensures (n1 % k + n2 % k == (n1 + n2) % k))
let mod_add_small n1 n2 k =
mod_add n1 n2 k;
Math.small_modulo_lemma_1 (n1%k + n2%k) k
// This proof is pretty stable with the calc proof, but it can fail
// ~1% of the times, so add a retry.
#push-options "--split_queries no --z3rlimit 20 --retry 5"
let add_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a + v b) % pow2 128 = v r)) =
let l = U64.add_mod a.low b.low in
let r = { low = l;
high = U64.add_mod (U64.add_mod a.high b.high) (carry l b.low)} in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
let b_l = U64.v b.low in
let b_h = U64.v b.high in
carry_sum_ok a.low b.low;
Math.lemma_mod_plus_distr_l (a_h + b_h) ((a_l + b_l) / (pow2 64)) (pow2 64);
calc (==) {
U64.v r.high * pow2 64;
== {}
((a_h + b_h + (a_l + b_l) / (pow2 64)) % pow2 64) * pow2 64;
== { mod_mul (a_h + b_h + (a_l + b_l) / (pow2 64)) (pow2 64) (pow2 64) }
((a_h + b_h + (a_l + b_l) / (pow2 64)) * pow2 64) % (pow2 64 * pow2 64);
== {}
((a_h + b_h + (a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
== {}
(a_h * pow2 64 + b_h * pow2 64 + ((a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
};
assert (U64.v r.low == (U64.v r.low) % pow2 128);
mod_add_small (a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64))
((a_l + b_l) % (pow2 64))
(pow2 128);
assert (U64.v r.low + U64.v r.high * pow2 64 ==
(a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64) + (a_l + b_l) % (pow2 64)) % pow2 128);
mod_spec_rew_n (a_l + b_l) (pow2 64);
assert (v r ==
(a_h * pow2 64 +
b_h * pow2 64 +
a_l + b_l) % pow2 128);
assert_spinoff ((v a + v b) % pow2 128 = v r);
r
#pop-options
#push-options "--retry 5"
let sub (a b: t) : Pure t
(requires (v a - v b >= 0))
(ensures (fun r -> v r = v a - v b)) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub (U64.sub a.high b.high) (carry a.low l); }
#pop-options
let sub_underspec (a b: t) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_underspec (U64.sub_underspec a.high b.high) (carry a.low l); }
let sub_mod_impl (a b: t) : t =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_mod (U64.sub_mod a.high b.high) (carry a.low l); }
#push-options "--retry 10" // flaky
let sub_mod_pos_ok (a b:t) : Lemma
(requires (v a - v b >= 0))
(ensures (v (sub_mod_impl a b) = v a - v b)) =
assert (sub a b == sub_mod_impl a b);
()
#pop-options
val u64_diff_wrap : a:U64.t -> b:U64.t ->
Lemma (requires (U64.v a < U64.v b))
(ensures (U64.v (U64.sub_mod a b) == U64.v a - U64.v b + pow2 64))
let u64_diff_wrap a b = ()
#push-options "--z3rlimit 20"
val sub_mod_wrap1_ok : a:t -> b:t -> Lemma
(requires (v a - v b < 0 /\ U64.v a.low < U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128))
#push-options "--retry 10"
let sub_mod_wrap1_ok a b =
// carry == 1 and subtraction in low wraps
let l = U64.sub_mod a.low b.low in
assert (U64.v (carry a.low l) == 1);
u64_diff_wrap a.low b.low;
// a.high <= b.high since v a < v b;
// case split on equality and strictly less
if U64.v a.high = U64.v b.high then ()
else begin
u64_diff_wrap a.high b.high;
()
end
#pop-options
let sum_lt (a1 a2 b1 b2:nat) : Lemma
(requires (a1 + a2 < b1 + b2 /\ a1 >= b1))
(ensures (a2 < b2)) = ()
let sub_mod_wrap2_ok (a b:t) : Lemma
(requires (v a - v b < 0 /\ U64.v a.low >= U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
// carry == 0, subtraction in low is exact, but subtraction in high
// must wrap since v a < v b
let l = U64.sub_mod a.low b.low in
let r = sub_mod_impl a b in
assert (U64.v l == U64.v a.low - U64.v b.low);
assert (U64.v (carry a.low l) == 0);
sum_lt (U64.v a.low) (U64.v a.high * pow2 64) (U64.v b.low) (U64.v b.high * pow2 64);
assert (U64.v (U64.sub_mod a.high b.high) == U64.v a.high - U64.v b.high + pow2 64);
()
let sub_mod_wrap_ok (a b:t) : Lemma
(requires (v a - v b < 0))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
if U64.v a.low < U64.v b.low
then sub_mod_wrap1_ok a b
else sub_mod_wrap2_ok a b
#push-options "--z3rlimit 40"
let sub_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = (v a - v b) % pow2 128)) =
(if v a - v b >= 0
then sub_mod_pos_ok a b
else sub_mod_wrap_ok a b);
sub_mod_impl a b
#pop-options
val shift_bound : #n:nat -> num:UInt.uint_t n -> n':nat ->
Lemma (num * pow2 n' <= pow2 (n'+n) - pow2 n')
let shift_bound #n num n' =
Math.lemma_mult_le_right (pow2 n') num (pow2 n - 1);
Math.distributivity_sub_left (pow2 n) 1 (pow2 n');
Math.pow2_plus n' n
val append_uint : #n1:nat -> #n2:nat -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 -> UInt.uint_t (n1+n2)
let append_uint #n1 #n2 num1 num2 =
shift_bound num2 n1;
num1 + num2 * pow2 n1
val to_vec_append : #n1:nat{n1 > 0} -> #n2:nat{n2 > 0} -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 ->
Lemma (UInt.to_vec (append_uint num1 num2) == Seq.append (UInt.to_vec num2) (UInt.to_vec num1))
let to_vec_append #n1 #n2 num1 num2 =
UInt.append_lemma (UInt.to_vec num2) (UInt.to_vec num1)
let vec128 (a: t) : BV.bv_t 128 = UInt.to_vec #128 (v a)
let vec64 (a: U64.t) : BV.bv_t 64 = UInt.to_vec (U64.v a)
let to_vec_v (a: t) :
Lemma (vec128 a == Seq.append (vec64 a.high) (vec64 a.low)) =
to_vec_append (U64.v a.low) (U64.v a.high)
val logand_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2) ==
BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2))
(BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logand #128 (v a) (v b))) =
let r = { low = U64.logand a.low b.low;
high = U64.logand a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logand_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logand_vec (vec128 a) (vec128 b));
r
val logxor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2) ==
BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2))
(BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logxor #128 (v a) (v b))) =
let r = { low = U64.logxor a.low b.low;
high = U64.logxor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logxor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logxor_vec (vec128 a) (vec128 b));
r
val logor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2) ==
BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2))
(BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logor #128 (v a) (v b))) =
let r = { low = U64.logor a.low b.low;
high = U64.logor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logor_vec (vec128 a) (vec128 b));
r
val lognot_vec_append (#n1 #n2: pos) (a1: BV.bv_t n1) (a2: BV.bv_t n2) :
Lemma (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2) ==
BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot_vec_append #n1 #n2 a1 a2 =
Seq.lemma_eq_intro (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2))
(BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot (a: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.lognot #128 (v a))) =
let r = { low = U64.lognot a.low;
high = U64.lognot a.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
lognot_vec_append (vec64 a.high) (vec64 a.low);
to_vec_v a;
assert (vec128 r == BV.lognot_vec (vec128 a));
r
let mod_mul_cancel (n:nat) (k:nat{k > 0}) :
Lemma ((n * k) % k == 0) =
mod_spec (n * k) k;
mul_div_cancel n k;
()
let shift_past_mod (n:nat) (k1:nat) (k2:nat{k2 >= k1}) :
Lemma ((n * pow2 k2) % pow2 k1 == 0) =
assert (k2 == (k2 - k1) + k1);
Math.pow2_plus (k2 - k1) k1;
Math.paren_mul_right n (pow2 (k2 - k1)) (pow2 k1);
mod_mul_cancel (n * pow2 (k2 - k1)) (pow2 k1)
val mod_double: a:nat -> k:nat{k>0} ->
Lemma (a % k % k == a % k)
let mod_double a k =
mod_mod a k 1
let shift_left_large_val (#n1:nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s:nat) :
Lemma ((a1 + a2 * pow2 n1) * pow2 s == (a1 * pow2 s + a2 * pow2 (n1+s))) =
Math.distributivity_add_left a1 (a2 * pow2 n1) (pow2 s);
Math.paren_mul_right a2 (pow2 n1) (pow2 s);
Math.pow2_plus n1 s
#push-options "--z3rlimit 40"
let shift_left_large_lemma (#n1: nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s: nat{s >= n2}) :
Lemma (((a1 + a2 * pow2 n1) * pow2 s) % pow2 (n1+n2) ==
(a1 * pow2 s) % pow2 (n1+n2)) =
shift_left_large_val a1 a2 s;
mod_add (a1 * pow2 s) (a2 * pow2 (n1+s)) (pow2 (n1+n2));
shift_past_mod a2 (n1+n2) (n1+s);
mod_double (a1 * pow2 s) (pow2 (n1+n2));
()
#pop-options
val shift_left_large_lemma_t : a:t -> s:nat ->
Lemma (requires (s >= 64))
(ensures ((v a * pow2 s) % pow2 128 ==
(U64.v a.low * pow2 s) % pow2 128))
let shift_left_large_lemma_t a s =
shift_left_large_lemma #64 #64 (U64.v a.low) (U64.v a.high) s
private let u32_64: n:U32.t{U32.v n == 64} = U32.uint_to_t 64
val div_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} -> Lemma
(requires True)
(ensures (a / pow2 (n1 - n2) == a * pow2 n2 / pow2 n1))
let div_pow2_diff a n1 n2 =
Math.pow2_plus n2 (n1-n2);
assert (a * pow2 n2 / pow2 n1 == a * pow2 n2 / (pow2 n2 * pow2 (n1 - n2)));
div_product (a * pow2 n2) (pow2 n2) (pow2 (n1-n2));
mul_div_cancel a (pow2 n2)
val mod_mul_pow2 : n:nat -> e1:nat -> e2:nat ->
Lemma (n % pow2 e1 * pow2 e2 <= pow2 (e1+e2) - pow2 e2)
let mod_mul_pow2 n e1 e2 =
Math.lemma_mod_lt n (pow2 e1);
Math.lemma_mult_le_right (pow2 e2) (n % pow2 e1) (pow2 e1 - 1);
assert (n % pow2 e1 * pow2 e2 <= pow2 e1 * pow2 e2 - pow2 e2);
Math.pow2_plus e1 e2
let pow2_div_bound #b (n:UInt.uint_t b) (s:nat{s <= b}) :
Lemma (n / pow2 s < pow2 (b - s)) =
Math.lemma_div_lt n b s
#push-options "--smtencoding.l_arith_repr native --z3rlimit 40"
let add_u64_shift_left (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r = (U64.v hi * pow2 (U32.v s)) % pow2 64 + U64.v lo / pow2 (64 - U32.v s))) =
let high = U64.shift_left hi s in
let low = U64.shift_right lo (U32.sub u32_64 s) in
let s = U32.v s in
let high_n = U64.v hi % pow2 (64 - s) * pow2 s in
let low_n = U64.v lo / pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 s) (pow2 (64-s));
assert (U64.v high == high_n);
assert (U64.v low == low_n);
pow2_div_bound (U64.v lo) (64-s);
assert (low_n < pow2 s);
mod_mul_pow2 (U64.v hi) (64 - s) s;
U64.add high low
#pop-options
let div_plus_multiple (a:nat) (b:nat) (k:pos) :
Lemma (requires (a < k))
(ensures ((a + b * k) / k == b)) =
Math.division_addition_lemma a k b;
Math.small_division_lemma_1 a k
val div_add_small: n:nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (k1*m / (k1*k2) == (n + k1*m) / (k1*k2)))
let div_add_small n m k1 k2 =
div_product (k1*m) k1 k2;
div_product (n+k1*m) k1 k2;
mul_div_cancel m k1;
assert (k1*m/k1 == m);
div_plus_multiple n m k1
val add_mod_small: n: nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (n + (k1 * m) % (k1 * k2) ==
(n + k1 * m) % (k1 * k2)))
let add_mod_small n m k1 k2 =
mod_spec (k1 * m) (k1 * k2);
mod_spec (n + k1 * m) (k1 * k2);
div_add_small n m k1 k2
let mod_then_mul_64 (n:nat) : Lemma (n % pow2 64 * pow2 64 == n * pow2 64 % pow2 128) =
Math.pow2_plus 64 64;
mod_mul n (pow2 64) (pow2 64)
let mul_abc_to_acb (a b c: int) : Lemma (a * b * c == a * c * b) = ()
let add_u64_shift_left_respec (hi lo:U64.t) (s:U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r ->
U64.v r * pow2 64 ==
(U64.v hi * pow2 64) * pow2 (U32.v s) % pow2 128 +
U64.v lo * pow2 (U32.v s) / pow2 64 * pow2 64)) =
let r = add_u64_shift_left hi lo s in
let hi = U64.v hi in
let lo = U64.v lo in
let s = U32.v s in
// spec of add_u64_shift_left
assert (U64.v r == hi * pow2 s % pow2 64 + lo / pow2 (64 - s));
Math.distributivity_add_left (hi * pow2 s % pow2 64) (lo / pow2 (64-s)) (pow2 64);
mod_then_mul_64 (hi * pow2 s);
assert (hi * pow2 s % pow2 64 * pow2 64 == (hi * pow2 s * pow2 64) % pow2 128);
div_pow2_diff lo 64 s;
assert (lo / pow2 (64-s) == lo * pow2 s / pow2 64);
assert (U64.v r * pow2 64 == hi * pow2 s * pow2 64 % pow2 128 + lo * pow2 s / pow2 64 * pow2 64);
mul_abc_to_acb hi (pow2 s) (pow2 64);
r
val add_mod_small' : n:nat -> m:nat -> k:pos ->
Lemma (requires (n + m % k < k))
(ensures (n + m % k == (n + m) % k))
let add_mod_small' n m k =
Math.lemma_mod_lt (n + m % k) k;
Math.modulo_lemma n k;
mod_add n m k
#push-options "--retry 5"
let shift_t_val (a: t) (s: nat) :
Lemma (v a * pow2 s == U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s)) =
Math.pow2_plus 64 s;
()
#pop-options
val mul_mod_bound : n:nat -> s1:nat -> s2:nat{s2>=s1} ->
Lemma (n * pow2 s1 % pow2 s2 <= pow2 s2 - pow2 s1)
#push-options "--retry 5"
let mul_mod_bound n s1 s2 =
// n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1
// n % pow2 (s2-s1) <= pow2 (s2-s1) - 1
// n % pow2 (s2-s1) * pow2 s1 <= pow2 s2 - pow2 s1
mod_mul n (pow2 s1) (pow2 (s2-s1));
// assert (n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1);
Math.lemma_mod_lt n (pow2 (s2-s1));
Math.lemma_mult_le_right (pow2 s1) (n % pow2 (s2-s1)) (pow2 (s2-s1) - 1);
Math.pow2_plus (s2-s1) s1
#pop-options
let add_lt_le (a a' b b': int) :
Lemma (requires (a < a' /\ b <= b'))
(ensures (a + b < a' + b')) = ()
let u64_pow2_bound (a: UInt.uint_t 64) (s: nat) : | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val u64_pow2_bound (a: UInt.uint_t 64) (s: nat) : Lemma (a * pow2 s < pow2 (64 + s)) | [] | FStar.UInt128.u64_pow2_bound | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: FStar.UInt.uint_t 64 -> s: Prims.nat
-> FStar.Pervasives.Lemma (ensures a * Prims.pow2 s < Prims.pow2 (64 + s)) | {
"end_col": 47,
"end_line": 651,
"start_col": 2,
"start_line": 650
} |
FStar.Pervasives.Lemma | val add_mod_small: n: nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (n + (k1 * m) % (k1 * k2) ==
(n + k1 * m) % (k1 * k2))) | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let add_mod_small n m k1 k2 =
mod_spec (k1 * m) (k1 * k2);
mod_spec (n + k1 * m) (k1 * k2);
div_add_small n m k1 k2 | val add_mod_small: n: nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (n + (k1 * m) % (k1 * k2) ==
(n + k1 * m) % (k1 * k2)))
let add_mod_small n m k1 k2 = | false | null | true | mod_spec (k1 * m) (k1 * k2);
mod_spec (n + k1 * m) (k1 * k2);
div_add_small n m k1 k2 | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [
"lemma"
] | [
"Prims.nat",
"Prims.pos",
"FStar.UInt128.div_add_small",
"Prims.unit",
"FStar.UInt128.mod_spec",
"Prims.op_Addition",
"FStar.Mul.op_Star"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract
let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63
let carry_uint64_ok (a b:uint_t 64)
: squash (int2bv (carry_uint64 a b) == carry_bv a b)
= _ by (T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl())
let fact1 (a b: uint_t 64) = carry_bv a b == int2bv 1
let fact0 (a b: uint_t 64) = carry_bv a b == int2bv 0
let lem_ult_1 (a b: uint_t 64)
: squash (bvult (int2bv a) (int2bv b) ==> fact1 a b)
= assert (bvult (int2bv a) (int2bv b) ==> fact1 a b)
by (T.norm [delta_only [`%fact1;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'";
smt())
let lem_ult_2 (a b:uint_t 64)
: squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
= assert (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
by (T.norm [delta_only [`%fact0;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'")
let int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b))
= introduce (a < b) ==> (bvult #n (int2bv #n a) (int2bv #n b))
with _ . FStar.BV.int2bv_lemma_ult_1 a b;
introduce (bvult #n (int2bv #n a) (int2bv #n b)) ==> (a < b)
with _ . FStar.BV.int2bv_lemma_ult_2 a b
let lem_ult (a b:uint_t 64)
: Lemma (if a < b
then fact1 a b
else fact0 a b)
= int2bv_ult a b;
lem_ult_1 a b;
lem_ult_2 a b
let constant_time_carry (a b: U64.t) : Tot U64.t =
let open U64 in
// CONSTANT_TIME_CARRY macro
// ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
// 63 = sizeof(a) * 8 - 1
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63ul
let carry_uint64_equiv (a b:UInt64.t)
: Lemma (U64.v (constant_time_carry a b) == carry_uint64 (U64.v a) (U64.v b))
= ()
// This type gets a special treatment in KaRaMeL and its definition is never
// printed in the resulting C file.
type uint128: Type0 = { low: U64.t; high: U64.t }
let t = uint128
let _ = intro_ambient n
let _ = intro_ambient t
[@@ noextract_to "krml"]
let v x = U64.v x.low + (U64.v x.high) * (pow2 64)
let div_mod (x:nat) (k:nat{k > 0}) : Lemma (x / k * k + x % k == x) = ()
let uint_to_t x =
div_mod x (pow2 64);
{ low = U64.uint_to_t (x % (pow2 64));
high = U64.uint_to_t (x / (pow2 64)); }
let v_inj (x1 x2: t): Lemma (requires (v x1 == v x2))
(ensures x1 == x2) =
assert (uint_to_t (v x1) == uint_to_t (v x2));
assert (uint_to_t (v x1) == x1);
assert (uint_to_t (v x2) == x2);
()
(* A weird helper used below... seems like the native encoding of
bitvectors may be making these proofs much harder than they should be? *)
let bv2int_fun (#n:pos) (a b : bv_t n)
: Lemma (requires a == b)
(ensures bv2int a == bv2int b)
= ()
(* This proof is quite brittle. It has a bunch of annotations to get
decent verification performance. *)
let constant_time_carry_ok (a b:U64.t)
: Lemma (constant_time_carry a b ==
(if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0))
= calc (==) {
U64.v (constant_time_carry a b);
(==) { carry_uint64_equiv a b }
carry_uint64 (U64.v a) (U64.v b);
(==) { inverse_num_lemma (carry_uint64 (U64.v a) (U64.v b)) }
bv2int (int2bv (carry_uint64 (U64.v a) (U64.v b)));
(==) { carry_uint64_ok (U64.v a) (U64.v b);
bv2int_fun (int2bv (carry_uint64 (U64.v a) (U64.v b))) (carry_bv (U64.v a) (U64.v b));
()
}
bv2int (carry_bv (U64.v a) (U64.v b));
(==) {
lem_ult (U64.v a) (U64.v b);
bv2int_fun (carry_bv (U64.v a) (U64.v b)) (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
}
bv2int
(if U64.v a < U64.v b
then int2bv 1
else int2bv 0);
};
assert (
bv2int (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
== U64.v (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)) by (T.norm []);
U64.v_inj (constant_time_carry a b) (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)
let carry (a b: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r == (if U64.v a < U64.v b then 1 else 0))) =
constant_time_carry_ok a b;
constant_time_carry a b
let carry_sum_ok (a b:U64.t) :
Lemma (U64.v (carry (U64.add_mod a b) b) == (U64.v a + U64.v b) / (pow2 64)) = ()
let add (a b: t) : Pure t
(requires (v a + v b < pow2 128))
(ensures (fun r -> v a + v b = v r)) =
let l = U64.add_mod a.low b.low in
carry_sum_ok a.low b.low;
{ low = l;
high = U64.add (U64.add a.high b.high) (carry l b.low); }
let add_underspec (a b: t) =
let l = U64.add_mod a.low b.low in
begin
if v a + v b < pow2 128
then carry_sum_ok a.low b.low
else ()
end;
{ low = l;
high = U64.add_underspec (U64.add_underspec a.high b.high) (carry l b.low); }
val mod_mod: a:nat -> k:nat{k>0} -> k':nat{k'>0} ->
Lemma ((a % k) % (k'*k) == a % k)
let mod_mod a k k' =
assert (a % k < k);
assert (a % k < k' * k)
let mod_spec (a:nat) (k:nat{k > 0}) :
Lemma (a % k == a - a / k * k) = ()
val div_product : n:nat -> m1:nat{m1>0} -> m2:nat{m2>0} ->
Lemma (n / (m1*m2) == (n / m1) / m2)
let div_product n m1 m2 =
Math.division_multiplication_lemma n m1 m2
val mul_div_cancel : n:nat -> k:nat{k>0} ->
Lemma ((n * k) / k == n)
let mul_div_cancel n k =
Math.cancel_mul_div n k
val mod_mul: n:nat -> k1:pos -> k2:pos ->
Lemma ((n % k2) * k1 == (n * k1) % (k1*k2))
let mod_mul n k1 k2 =
Math.modulo_scale_lemma n k1 k2
let mod_spec_rew_n (n:nat) (k:nat{k > 0}) :
Lemma (n == n / k * k + n % k) = mod_spec n k
val mod_add: n1:nat -> n2:nat -> k:nat{k > 0} ->
Lemma ((n1 % k + n2 % k) % k == (n1 + n2) % k)
let mod_add n1 n2 k = Math.modulo_distributivity n1 n2 k
val mod_add_small: n1:nat -> n2:nat -> k:nat{k > 0} -> Lemma
(requires (n1 % k + n2 % k < k))
(ensures (n1 % k + n2 % k == (n1 + n2) % k))
let mod_add_small n1 n2 k =
mod_add n1 n2 k;
Math.small_modulo_lemma_1 (n1%k + n2%k) k
// This proof is pretty stable with the calc proof, but it can fail
// ~1% of the times, so add a retry.
#push-options "--split_queries no --z3rlimit 20 --retry 5"
let add_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a + v b) % pow2 128 = v r)) =
let l = U64.add_mod a.low b.low in
let r = { low = l;
high = U64.add_mod (U64.add_mod a.high b.high) (carry l b.low)} in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
let b_l = U64.v b.low in
let b_h = U64.v b.high in
carry_sum_ok a.low b.low;
Math.lemma_mod_plus_distr_l (a_h + b_h) ((a_l + b_l) / (pow2 64)) (pow2 64);
calc (==) {
U64.v r.high * pow2 64;
== {}
((a_h + b_h + (a_l + b_l) / (pow2 64)) % pow2 64) * pow2 64;
== { mod_mul (a_h + b_h + (a_l + b_l) / (pow2 64)) (pow2 64) (pow2 64) }
((a_h + b_h + (a_l + b_l) / (pow2 64)) * pow2 64) % (pow2 64 * pow2 64);
== {}
((a_h + b_h + (a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
== {}
(a_h * pow2 64 + b_h * pow2 64 + ((a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
};
assert (U64.v r.low == (U64.v r.low) % pow2 128);
mod_add_small (a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64))
((a_l + b_l) % (pow2 64))
(pow2 128);
assert (U64.v r.low + U64.v r.high * pow2 64 ==
(a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64) + (a_l + b_l) % (pow2 64)) % pow2 128);
mod_spec_rew_n (a_l + b_l) (pow2 64);
assert (v r ==
(a_h * pow2 64 +
b_h * pow2 64 +
a_l + b_l) % pow2 128);
assert_spinoff ((v a + v b) % pow2 128 = v r);
r
#pop-options
#push-options "--retry 5"
let sub (a b: t) : Pure t
(requires (v a - v b >= 0))
(ensures (fun r -> v r = v a - v b)) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub (U64.sub a.high b.high) (carry a.low l); }
#pop-options
let sub_underspec (a b: t) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_underspec (U64.sub_underspec a.high b.high) (carry a.low l); }
let sub_mod_impl (a b: t) : t =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_mod (U64.sub_mod a.high b.high) (carry a.low l); }
#push-options "--retry 10" // flaky
let sub_mod_pos_ok (a b:t) : Lemma
(requires (v a - v b >= 0))
(ensures (v (sub_mod_impl a b) = v a - v b)) =
assert (sub a b == sub_mod_impl a b);
()
#pop-options
val u64_diff_wrap : a:U64.t -> b:U64.t ->
Lemma (requires (U64.v a < U64.v b))
(ensures (U64.v (U64.sub_mod a b) == U64.v a - U64.v b + pow2 64))
let u64_diff_wrap a b = ()
#push-options "--z3rlimit 20"
val sub_mod_wrap1_ok : a:t -> b:t -> Lemma
(requires (v a - v b < 0 /\ U64.v a.low < U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128))
#push-options "--retry 10"
let sub_mod_wrap1_ok a b =
// carry == 1 and subtraction in low wraps
let l = U64.sub_mod a.low b.low in
assert (U64.v (carry a.low l) == 1);
u64_diff_wrap a.low b.low;
// a.high <= b.high since v a < v b;
// case split on equality and strictly less
if U64.v a.high = U64.v b.high then ()
else begin
u64_diff_wrap a.high b.high;
()
end
#pop-options
let sum_lt (a1 a2 b1 b2:nat) : Lemma
(requires (a1 + a2 < b1 + b2 /\ a1 >= b1))
(ensures (a2 < b2)) = ()
let sub_mod_wrap2_ok (a b:t) : Lemma
(requires (v a - v b < 0 /\ U64.v a.low >= U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
// carry == 0, subtraction in low is exact, but subtraction in high
// must wrap since v a < v b
let l = U64.sub_mod a.low b.low in
let r = sub_mod_impl a b in
assert (U64.v l == U64.v a.low - U64.v b.low);
assert (U64.v (carry a.low l) == 0);
sum_lt (U64.v a.low) (U64.v a.high * pow2 64) (U64.v b.low) (U64.v b.high * pow2 64);
assert (U64.v (U64.sub_mod a.high b.high) == U64.v a.high - U64.v b.high + pow2 64);
()
let sub_mod_wrap_ok (a b:t) : Lemma
(requires (v a - v b < 0))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
if U64.v a.low < U64.v b.low
then sub_mod_wrap1_ok a b
else sub_mod_wrap2_ok a b
#push-options "--z3rlimit 40"
let sub_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = (v a - v b) % pow2 128)) =
(if v a - v b >= 0
then sub_mod_pos_ok a b
else sub_mod_wrap_ok a b);
sub_mod_impl a b
#pop-options
val shift_bound : #n:nat -> num:UInt.uint_t n -> n':nat ->
Lemma (num * pow2 n' <= pow2 (n'+n) - pow2 n')
let shift_bound #n num n' =
Math.lemma_mult_le_right (pow2 n') num (pow2 n - 1);
Math.distributivity_sub_left (pow2 n) 1 (pow2 n');
Math.pow2_plus n' n
val append_uint : #n1:nat -> #n2:nat -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 -> UInt.uint_t (n1+n2)
let append_uint #n1 #n2 num1 num2 =
shift_bound num2 n1;
num1 + num2 * pow2 n1
val to_vec_append : #n1:nat{n1 > 0} -> #n2:nat{n2 > 0} -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 ->
Lemma (UInt.to_vec (append_uint num1 num2) == Seq.append (UInt.to_vec num2) (UInt.to_vec num1))
let to_vec_append #n1 #n2 num1 num2 =
UInt.append_lemma (UInt.to_vec num2) (UInt.to_vec num1)
let vec128 (a: t) : BV.bv_t 128 = UInt.to_vec #128 (v a)
let vec64 (a: U64.t) : BV.bv_t 64 = UInt.to_vec (U64.v a)
let to_vec_v (a: t) :
Lemma (vec128 a == Seq.append (vec64 a.high) (vec64 a.low)) =
to_vec_append (U64.v a.low) (U64.v a.high)
val logand_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2) ==
BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2))
(BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logand #128 (v a) (v b))) =
let r = { low = U64.logand a.low b.low;
high = U64.logand a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logand_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logand_vec (vec128 a) (vec128 b));
r
val logxor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2) ==
BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2))
(BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logxor #128 (v a) (v b))) =
let r = { low = U64.logxor a.low b.low;
high = U64.logxor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logxor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logxor_vec (vec128 a) (vec128 b));
r
val logor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2) ==
BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2))
(BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logor #128 (v a) (v b))) =
let r = { low = U64.logor a.low b.low;
high = U64.logor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logor_vec (vec128 a) (vec128 b));
r
val lognot_vec_append (#n1 #n2: pos) (a1: BV.bv_t n1) (a2: BV.bv_t n2) :
Lemma (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2) ==
BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot_vec_append #n1 #n2 a1 a2 =
Seq.lemma_eq_intro (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2))
(BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot (a: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.lognot #128 (v a))) =
let r = { low = U64.lognot a.low;
high = U64.lognot a.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
lognot_vec_append (vec64 a.high) (vec64 a.low);
to_vec_v a;
assert (vec128 r == BV.lognot_vec (vec128 a));
r
let mod_mul_cancel (n:nat) (k:nat{k > 0}) :
Lemma ((n * k) % k == 0) =
mod_spec (n * k) k;
mul_div_cancel n k;
()
let shift_past_mod (n:nat) (k1:nat) (k2:nat{k2 >= k1}) :
Lemma ((n * pow2 k2) % pow2 k1 == 0) =
assert (k2 == (k2 - k1) + k1);
Math.pow2_plus (k2 - k1) k1;
Math.paren_mul_right n (pow2 (k2 - k1)) (pow2 k1);
mod_mul_cancel (n * pow2 (k2 - k1)) (pow2 k1)
val mod_double: a:nat -> k:nat{k>0} ->
Lemma (a % k % k == a % k)
let mod_double a k =
mod_mod a k 1
let shift_left_large_val (#n1:nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s:nat) :
Lemma ((a1 + a2 * pow2 n1) * pow2 s == (a1 * pow2 s + a2 * pow2 (n1+s))) =
Math.distributivity_add_left a1 (a2 * pow2 n1) (pow2 s);
Math.paren_mul_right a2 (pow2 n1) (pow2 s);
Math.pow2_plus n1 s
#push-options "--z3rlimit 40"
let shift_left_large_lemma (#n1: nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s: nat{s >= n2}) :
Lemma (((a1 + a2 * pow2 n1) * pow2 s) % pow2 (n1+n2) ==
(a1 * pow2 s) % pow2 (n1+n2)) =
shift_left_large_val a1 a2 s;
mod_add (a1 * pow2 s) (a2 * pow2 (n1+s)) (pow2 (n1+n2));
shift_past_mod a2 (n1+n2) (n1+s);
mod_double (a1 * pow2 s) (pow2 (n1+n2));
()
#pop-options
val shift_left_large_lemma_t : a:t -> s:nat ->
Lemma (requires (s >= 64))
(ensures ((v a * pow2 s) % pow2 128 ==
(U64.v a.low * pow2 s) % pow2 128))
let shift_left_large_lemma_t a s =
shift_left_large_lemma #64 #64 (U64.v a.low) (U64.v a.high) s
private let u32_64: n:U32.t{U32.v n == 64} = U32.uint_to_t 64
val div_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} -> Lemma
(requires True)
(ensures (a / pow2 (n1 - n2) == a * pow2 n2 / pow2 n1))
let div_pow2_diff a n1 n2 =
Math.pow2_plus n2 (n1-n2);
assert (a * pow2 n2 / pow2 n1 == a * pow2 n2 / (pow2 n2 * pow2 (n1 - n2)));
div_product (a * pow2 n2) (pow2 n2) (pow2 (n1-n2));
mul_div_cancel a (pow2 n2)
val mod_mul_pow2 : n:nat -> e1:nat -> e2:nat ->
Lemma (n % pow2 e1 * pow2 e2 <= pow2 (e1+e2) - pow2 e2)
let mod_mul_pow2 n e1 e2 =
Math.lemma_mod_lt n (pow2 e1);
Math.lemma_mult_le_right (pow2 e2) (n % pow2 e1) (pow2 e1 - 1);
assert (n % pow2 e1 * pow2 e2 <= pow2 e1 * pow2 e2 - pow2 e2);
Math.pow2_plus e1 e2
let pow2_div_bound #b (n:UInt.uint_t b) (s:nat{s <= b}) :
Lemma (n / pow2 s < pow2 (b - s)) =
Math.lemma_div_lt n b s
#push-options "--smtencoding.l_arith_repr native --z3rlimit 40"
let add_u64_shift_left (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r = (U64.v hi * pow2 (U32.v s)) % pow2 64 + U64.v lo / pow2 (64 - U32.v s))) =
let high = U64.shift_left hi s in
let low = U64.shift_right lo (U32.sub u32_64 s) in
let s = U32.v s in
let high_n = U64.v hi % pow2 (64 - s) * pow2 s in
let low_n = U64.v lo / pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 s) (pow2 (64-s));
assert (U64.v high == high_n);
assert (U64.v low == low_n);
pow2_div_bound (U64.v lo) (64-s);
assert (low_n < pow2 s);
mod_mul_pow2 (U64.v hi) (64 - s) s;
U64.add high low
#pop-options
let div_plus_multiple (a:nat) (b:nat) (k:pos) :
Lemma (requires (a < k))
(ensures ((a + b * k) / k == b)) =
Math.division_addition_lemma a k b;
Math.small_division_lemma_1 a k
val div_add_small: n:nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (k1*m / (k1*k2) == (n + k1*m) / (k1*k2)))
let div_add_small n m k1 k2 =
div_product (k1*m) k1 k2;
div_product (n+k1*m) k1 k2;
mul_div_cancel m k1;
assert (k1*m/k1 == m);
div_plus_multiple n m k1
val add_mod_small: n: nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (n + (k1 * m) % (k1 * k2) ==
(n + k1 * m) % (k1 * k2))) | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val add_mod_small: n: nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (n + (k1 * m) % (k1 * k2) ==
(n + k1 * m) % (k1 * k2))) | [] | FStar.UInt128.add_mod_small | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | n: Prims.nat -> m: Prims.nat -> k1: Prims.pos -> k2: Prims.pos
-> FStar.Pervasives.Lemma (requires n < k1)
(ensures n + k1 * m % (k1 * k2) == (n + k1 * m) % (k1 * k2)) | {
"end_col": 25,
"end_line": 585,
"start_col": 2,
"start_line": 583
} |
FStar.Pervasives.Lemma | val div_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} -> Lemma
(requires True)
(ensures (a / pow2 (n1 - n2) == a * pow2 n2 / pow2 n1)) | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let div_pow2_diff a n1 n2 =
Math.pow2_plus n2 (n1-n2);
assert (a * pow2 n2 / pow2 n1 == a * pow2 n2 / (pow2 n2 * pow2 (n1 - n2)));
div_product (a * pow2 n2) (pow2 n2) (pow2 (n1-n2));
mul_div_cancel a (pow2 n2) | val div_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} -> Lemma
(requires True)
(ensures (a / pow2 (n1 - n2) == a * pow2 n2 / pow2 n1))
let div_pow2_diff a n1 n2 = | false | null | true | Math.pow2_plus n2 (n1 - n2);
assert (a * pow2 n2 / pow2 n1 == a * pow2 n2 / (pow2 n2 * pow2 (n1 - n2)));
div_product (a * pow2 n2) (pow2 n2) (pow2 (n1 - n2));
mul_div_cancel a (pow2 n2) | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [
"lemma"
] | [
"Prims.nat",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"FStar.UInt128.mul_div_cancel",
"Prims.pow2",
"Prims.unit",
"FStar.UInt128.div_product",
"FStar.Mul.op_Star",
"Prims.op_Subtraction",
"Prims._assert",
"Prims.eq2",
"Prims.int",
"Prims.op_Division",
"FStar.Math.Lemmas.pow2_plus"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract
let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63
let carry_uint64_ok (a b:uint_t 64)
: squash (int2bv (carry_uint64 a b) == carry_bv a b)
= _ by (T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl())
let fact1 (a b: uint_t 64) = carry_bv a b == int2bv 1
let fact0 (a b: uint_t 64) = carry_bv a b == int2bv 0
let lem_ult_1 (a b: uint_t 64)
: squash (bvult (int2bv a) (int2bv b) ==> fact1 a b)
= assert (bvult (int2bv a) (int2bv b) ==> fact1 a b)
by (T.norm [delta_only [`%fact1;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'";
smt())
let lem_ult_2 (a b:uint_t 64)
: squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
= assert (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
by (T.norm [delta_only [`%fact0;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'")
let int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b))
= introduce (a < b) ==> (bvult #n (int2bv #n a) (int2bv #n b))
with _ . FStar.BV.int2bv_lemma_ult_1 a b;
introduce (bvult #n (int2bv #n a) (int2bv #n b)) ==> (a < b)
with _ . FStar.BV.int2bv_lemma_ult_2 a b
let lem_ult (a b:uint_t 64)
: Lemma (if a < b
then fact1 a b
else fact0 a b)
= int2bv_ult a b;
lem_ult_1 a b;
lem_ult_2 a b
let constant_time_carry (a b: U64.t) : Tot U64.t =
let open U64 in
// CONSTANT_TIME_CARRY macro
// ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
// 63 = sizeof(a) * 8 - 1
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63ul
let carry_uint64_equiv (a b:UInt64.t)
: Lemma (U64.v (constant_time_carry a b) == carry_uint64 (U64.v a) (U64.v b))
= ()
// This type gets a special treatment in KaRaMeL and its definition is never
// printed in the resulting C file.
type uint128: Type0 = { low: U64.t; high: U64.t }
let t = uint128
let _ = intro_ambient n
let _ = intro_ambient t
[@@ noextract_to "krml"]
let v x = U64.v x.low + (U64.v x.high) * (pow2 64)
let div_mod (x:nat) (k:nat{k > 0}) : Lemma (x / k * k + x % k == x) = ()
let uint_to_t x =
div_mod x (pow2 64);
{ low = U64.uint_to_t (x % (pow2 64));
high = U64.uint_to_t (x / (pow2 64)); }
let v_inj (x1 x2: t): Lemma (requires (v x1 == v x2))
(ensures x1 == x2) =
assert (uint_to_t (v x1) == uint_to_t (v x2));
assert (uint_to_t (v x1) == x1);
assert (uint_to_t (v x2) == x2);
()
(* A weird helper used below... seems like the native encoding of
bitvectors may be making these proofs much harder than they should be? *)
let bv2int_fun (#n:pos) (a b : bv_t n)
: Lemma (requires a == b)
(ensures bv2int a == bv2int b)
= ()
(* This proof is quite brittle. It has a bunch of annotations to get
decent verification performance. *)
let constant_time_carry_ok (a b:U64.t)
: Lemma (constant_time_carry a b ==
(if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0))
= calc (==) {
U64.v (constant_time_carry a b);
(==) { carry_uint64_equiv a b }
carry_uint64 (U64.v a) (U64.v b);
(==) { inverse_num_lemma (carry_uint64 (U64.v a) (U64.v b)) }
bv2int (int2bv (carry_uint64 (U64.v a) (U64.v b)));
(==) { carry_uint64_ok (U64.v a) (U64.v b);
bv2int_fun (int2bv (carry_uint64 (U64.v a) (U64.v b))) (carry_bv (U64.v a) (U64.v b));
()
}
bv2int (carry_bv (U64.v a) (U64.v b));
(==) {
lem_ult (U64.v a) (U64.v b);
bv2int_fun (carry_bv (U64.v a) (U64.v b)) (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
}
bv2int
(if U64.v a < U64.v b
then int2bv 1
else int2bv 0);
};
assert (
bv2int (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
== U64.v (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)) by (T.norm []);
U64.v_inj (constant_time_carry a b) (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)
let carry (a b: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r == (if U64.v a < U64.v b then 1 else 0))) =
constant_time_carry_ok a b;
constant_time_carry a b
let carry_sum_ok (a b:U64.t) :
Lemma (U64.v (carry (U64.add_mod a b) b) == (U64.v a + U64.v b) / (pow2 64)) = ()
let add (a b: t) : Pure t
(requires (v a + v b < pow2 128))
(ensures (fun r -> v a + v b = v r)) =
let l = U64.add_mod a.low b.low in
carry_sum_ok a.low b.low;
{ low = l;
high = U64.add (U64.add a.high b.high) (carry l b.low); }
let add_underspec (a b: t) =
let l = U64.add_mod a.low b.low in
begin
if v a + v b < pow2 128
then carry_sum_ok a.low b.low
else ()
end;
{ low = l;
high = U64.add_underspec (U64.add_underspec a.high b.high) (carry l b.low); }
val mod_mod: a:nat -> k:nat{k>0} -> k':nat{k'>0} ->
Lemma ((a % k) % (k'*k) == a % k)
let mod_mod a k k' =
assert (a % k < k);
assert (a % k < k' * k)
let mod_spec (a:nat) (k:nat{k > 0}) :
Lemma (a % k == a - a / k * k) = ()
val div_product : n:nat -> m1:nat{m1>0} -> m2:nat{m2>0} ->
Lemma (n / (m1*m2) == (n / m1) / m2)
let div_product n m1 m2 =
Math.division_multiplication_lemma n m1 m2
val mul_div_cancel : n:nat -> k:nat{k>0} ->
Lemma ((n * k) / k == n)
let mul_div_cancel n k =
Math.cancel_mul_div n k
val mod_mul: n:nat -> k1:pos -> k2:pos ->
Lemma ((n % k2) * k1 == (n * k1) % (k1*k2))
let mod_mul n k1 k2 =
Math.modulo_scale_lemma n k1 k2
let mod_spec_rew_n (n:nat) (k:nat{k > 0}) :
Lemma (n == n / k * k + n % k) = mod_spec n k
val mod_add: n1:nat -> n2:nat -> k:nat{k > 0} ->
Lemma ((n1 % k + n2 % k) % k == (n1 + n2) % k)
let mod_add n1 n2 k = Math.modulo_distributivity n1 n2 k
val mod_add_small: n1:nat -> n2:nat -> k:nat{k > 0} -> Lemma
(requires (n1 % k + n2 % k < k))
(ensures (n1 % k + n2 % k == (n1 + n2) % k))
let mod_add_small n1 n2 k =
mod_add n1 n2 k;
Math.small_modulo_lemma_1 (n1%k + n2%k) k
// This proof is pretty stable with the calc proof, but it can fail
// ~1% of the times, so add a retry.
#push-options "--split_queries no --z3rlimit 20 --retry 5"
let add_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a + v b) % pow2 128 = v r)) =
let l = U64.add_mod a.low b.low in
let r = { low = l;
high = U64.add_mod (U64.add_mod a.high b.high) (carry l b.low)} in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
let b_l = U64.v b.low in
let b_h = U64.v b.high in
carry_sum_ok a.low b.low;
Math.lemma_mod_plus_distr_l (a_h + b_h) ((a_l + b_l) / (pow2 64)) (pow2 64);
calc (==) {
U64.v r.high * pow2 64;
== {}
((a_h + b_h + (a_l + b_l) / (pow2 64)) % pow2 64) * pow2 64;
== { mod_mul (a_h + b_h + (a_l + b_l) / (pow2 64)) (pow2 64) (pow2 64) }
((a_h + b_h + (a_l + b_l) / (pow2 64)) * pow2 64) % (pow2 64 * pow2 64);
== {}
((a_h + b_h + (a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
== {}
(a_h * pow2 64 + b_h * pow2 64 + ((a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
};
assert (U64.v r.low == (U64.v r.low) % pow2 128);
mod_add_small (a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64))
((a_l + b_l) % (pow2 64))
(pow2 128);
assert (U64.v r.low + U64.v r.high * pow2 64 ==
(a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64) + (a_l + b_l) % (pow2 64)) % pow2 128);
mod_spec_rew_n (a_l + b_l) (pow2 64);
assert (v r ==
(a_h * pow2 64 +
b_h * pow2 64 +
a_l + b_l) % pow2 128);
assert_spinoff ((v a + v b) % pow2 128 = v r);
r
#pop-options
#push-options "--retry 5"
let sub (a b: t) : Pure t
(requires (v a - v b >= 0))
(ensures (fun r -> v r = v a - v b)) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub (U64.sub a.high b.high) (carry a.low l); }
#pop-options
let sub_underspec (a b: t) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_underspec (U64.sub_underspec a.high b.high) (carry a.low l); }
let sub_mod_impl (a b: t) : t =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_mod (U64.sub_mod a.high b.high) (carry a.low l); }
#push-options "--retry 10" // flaky
let sub_mod_pos_ok (a b:t) : Lemma
(requires (v a - v b >= 0))
(ensures (v (sub_mod_impl a b) = v a - v b)) =
assert (sub a b == sub_mod_impl a b);
()
#pop-options
val u64_diff_wrap : a:U64.t -> b:U64.t ->
Lemma (requires (U64.v a < U64.v b))
(ensures (U64.v (U64.sub_mod a b) == U64.v a - U64.v b + pow2 64))
let u64_diff_wrap a b = ()
#push-options "--z3rlimit 20"
val sub_mod_wrap1_ok : a:t -> b:t -> Lemma
(requires (v a - v b < 0 /\ U64.v a.low < U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128))
#push-options "--retry 10"
let sub_mod_wrap1_ok a b =
// carry == 1 and subtraction in low wraps
let l = U64.sub_mod a.low b.low in
assert (U64.v (carry a.low l) == 1);
u64_diff_wrap a.low b.low;
// a.high <= b.high since v a < v b;
// case split on equality and strictly less
if U64.v a.high = U64.v b.high then ()
else begin
u64_diff_wrap a.high b.high;
()
end
#pop-options
let sum_lt (a1 a2 b1 b2:nat) : Lemma
(requires (a1 + a2 < b1 + b2 /\ a1 >= b1))
(ensures (a2 < b2)) = ()
let sub_mod_wrap2_ok (a b:t) : Lemma
(requires (v a - v b < 0 /\ U64.v a.low >= U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
// carry == 0, subtraction in low is exact, but subtraction in high
// must wrap since v a < v b
let l = U64.sub_mod a.low b.low in
let r = sub_mod_impl a b in
assert (U64.v l == U64.v a.low - U64.v b.low);
assert (U64.v (carry a.low l) == 0);
sum_lt (U64.v a.low) (U64.v a.high * pow2 64) (U64.v b.low) (U64.v b.high * pow2 64);
assert (U64.v (U64.sub_mod a.high b.high) == U64.v a.high - U64.v b.high + pow2 64);
()
let sub_mod_wrap_ok (a b:t) : Lemma
(requires (v a - v b < 0))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
if U64.v a.low < U64.v b.low
then sub_mod_wrap1_ok a b
else sub_mod_wrap2_ok a b
#push-options "--z3rlimit 40"
let sub_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = (v a - v b) % pow2 128)) =
(if v a - v b >= 0
then sub_mod_pos_ok a b
else sub_mod_wrap_ok a b);
sub_mod_impl a b
#pop-options
val shift_bound : #n:nat -> num:UInt.uint_t n -> n':nat ->
Lemma (num * pow2 n' <= pow2 (n'+n) - pow2 n')
let shift_bound #n num n' =
Math.lemma_mult_le_right (pow2 n') num (pow2 n - 1);
Math.distributivity_sub_left (pow2 n) 1 (pow2 n');
Math.pow2_plus n' n
val append_uint : #n1:nat -> #n2:nat -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 -> UInt.uint_t (n1+n2)
let append_uint #n1 #n2 num1 num2 =
shift_bound num2 n1;
num1 + num2 * pow2 n1
val to_vec_append : #n1:nat{n1 > 0} -> #n2:nat{n2 > 0} -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 ->
Lemma (UInt.to_vec (append_uint num1 num2) == Seq.append (UInt.to_vec num2) (UInt.to_vec num1))
let to_vec_append #n1 #n2 num1 num2 =
UInt.append_lemma (UInt.to_vec num2) (UInt.to_vec num1)
let vec128 (a: t) : BV.bv_t 128 = UInt.to_vec #128 (v a)
let vec64 (a: U64.t) : BV.bv_t 64 = UInt.to_vec (U64.v a)
let to_vec_v (a: t) :
Lemma (vec128 a == Seq.append (vec64 a.high) (vec64 a.low)) =
to_vec_append (U64.v a.low) (U64.v a.high)
val logand_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2) ==
BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2))
(BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logand #128 (v a) (v b))) =
let r = { low = U64.logand a.low b.low;
high = U64.logand a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logand_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logand_vec (vec128 a) (vec128 b));
r
val logxor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2) ==
BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2))
(BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logxor #128 (v a) (v b))) =
let r = { low = U64.logxor a.low b.low;
high = U64.logxor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logxor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logxor_vec (vec128 a) (vec128 b));
r
val logor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2) ==
BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2))
(BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logor #128 (v a) (v b))) =
let r = { low = U64.logor a.low b.low;
high = U64.logor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logor_vec (vec128 a) (vec128 b));
r
val lognot_vec_append (#n1 #n2: pos) (a1: BV.bv_t n1) (a2: BV.bv_t n2) :
Lemma (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2) ==
BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot_vec_append #n1 #n2 a1 a2 =
Seq.lemma_eq_intro (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2))
(BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot (a: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.lognot #128 (v a))) =
let r = { low = U64.lognot a.low;
high = U64.lognot a.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
lognot_vec_append (vec64 a.high) (vec64 a.low);
to_vec_v a;
assert (vec128 r == BV.lognot_vec (vec128 a));
r
let mod_mul_cancel (n:nat) (k:nat{k > 0}) :
Lemma ((n * k) % k == 0) =
mod_spec (n * k) k;
mul_div_cancel n k;
()
let shift_past_mod (n:nat) (k1:nat) (k2:nat{k2 >= k1}) :
Lemma ((n * pow2 k2) % pow2 k1 == 0) =
assert (k2 == (k2 - k1) + k1);
Math.pow2_plus (k2 - k1) k1;
Math.paren_mul_right n (pow2 (k2 - k1)) (pow2 k1);
mod_mul_cancel (n * pow2 (k2 - k1)) (pow2 k1)
val mod_double: a:nat -> k:nat{k>0} ->
Lemma (a % k % k == a % k)
let mod_double a k =
mod_mod a k 1
let shift_left_large_val (#n1:nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s:nat) :
Lemma ((a1 + a2 * pow2 n1) * pow2 s == (a1 * pow2 s + a2 * pow2 (n1+s))) =
Math.distributivity_add_left a1 (a2 * pow2 n1) (pow2 s);
Math.paren_mul_right a2 (pow2 n1) (pow2 s);
Math.pow2_plus n1 s
#push-options "--z3rlimit 40"
let shift_left_large_lemma (#n1: nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s: nat{s >= n2}) :
Lemma (((a1 + a2 * pow2 n1) * pow2 s) % pow2 (n1+n2) ==
(a1 * pow2 s) % pow2 (n1+n2)) =
shift_left_large_val a1 a2 s;
mod_add (a1 * pow2 s) (a2 * pow2 (n1+s)) (pow2 (n1+n2));
shift_past_mod a2 (n1+n2) (n1+s);
mod_double (a1 * pow2 s) (pow2 (n1+n2));
()
#pop-options
val shift_left_large_lemma_t : a:t -> s:nat ->
Lemma (requires (s >= 64))
(ensures ((v a * pow2 s) % pow2 128 ==
(U64.v a.low * pow2 s) % pow2 128))
let shift_left_large_lemma_t a s =
shift_left_large_lemma #64 #64 (U64.v a.low) (U64.v a.high) s
private let u32_64: n:U32.t{U32.v n == 64} = U32.uint_to_t 64
val div_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} -> Lemma
(requires True)
(ensures (a / pow2 (n1 - n2) == a * pow2 n2 / pow2 n1)) | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val div_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} -> Lemma
(requires True)
(ensures (a / pow2 (n1 - n2) == a * pow2 n2 / pow2 n1)) | [] | FStar.UInt128.div_pow2_diff | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: Prims.nat -> n1: Prims.nat -> n2: Prims.nat{n2 <= n1}
-> FStar.Pervasives.Lemma (ensures a / Prims.pow2 (n1 - n2) == a * Prims.pow2 n2 / Prims.pow2 n1) | {
"end_col": 28,
"end_line": 528,
"start_col": 2,
"start_line": 525
} |
FStar.Pervasives.Lemma | val shift_t_mod_val' (a: t) (s: nat{s < 64})
: Lemma
((v a * pow2 s) % pow2 128 == U64.v a.low * pow2 s + U64.v a.high * pow2 (64 + s) % pow2 128) | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let shift_t_mod_val' (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s) % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
u64_pow2_bound a_l s;
mul_mod_bound a_h (64+s) 128;
// assert (a_h * pow2 (64+s) % pow2 128 <= pow2 128 - pow2 (64+s));
add_lt_le (a_l * pow2 s) (pow2 (64+s)) (a_h * pow2 (64+s) % pow2 128) (pow2 128 - pow2 (64+s));
add_mod_small' (a_l * pow2 s) (a_h * pow2 (64+s)) (pow2 128);
shift_t_val a s;
() | val shift_t_mod_val' (a: t) (s: nat{s < 64})
: Lemma
((v a * pow2 s) % pow2 128 == U64.v a.low * pow2 s + U64.v a.high * pow2 (64 + s) % pow2 128)
let shift_t_mod_val' (a: t) (s: nat{s < 64})
: Lemma
((v a * pow2 s) % pow2 128 == U64.v a.low * pow2 s + U64.v a.high * pow2 (64 + s) % pow2 128) = | false | null | true | let a_l = U64.v a.low in
let a_h = U64.v a.high in
u64_pow2_bound a_l s;
mul_mod_bound a_h (64 + s) 128;
add_lt_le (a_l * pow2 s) (pow2 (64 + s)) (a_h * pow2 (64 + s) % pow2 128) (pow2 128 - pow2 (64 + s));
add_mod_small' (a_l * pow2 s) (a_h * pow2 (64 + s)) (pow2 128);
shift_t_val a s;
() | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [
"lemma"
] | [
"FStar.UInt128.t",
"Prims.nat",
"Prims.b2t",
"Prims.op_LessThan",
"Prims.unit",
"FStar.UInt128.shift_t_val",
"FStar.UInt128.add_mod_small'",
"FStar.Mul.op_Star",
"Prims.pow2",
"Prims.op_Addition",
"FStar.UInt128.add_lt_le",
"Prims.op_Modulus",
"Prims.op_Subtraction",
"FStar.UInt128.mul_mod_bound",
"FStar.UInt128.u64_pow2_bound",
"FStar.UInt.uint_t",
"FStar.UInt64.v",
"FStar.UInt128.__proj__Mkuint128__item__high",
"FStar.UInt128.__proj__Mkuint128__item__low",
"Prims.l_True",
"Prims.squash",
"Prims.eq2",
"Prims.int",
"FStar.UInt128.v",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract
let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63
let carry_uint64_ok (a b:uint_t 64)
: squash (int2bv (carry_uint64 a b) == carry_bv a b)
= _ by (T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl())
let fact1 (a b: uint_t 64) = carry_bv a b == int2bv 1
let fact0 (a b: uint_t 64) = carry_bv a b == int2bv 0
let lem_ult_1 (a b: uint_t 64)
: squash (bvult (int2bv a) (int2bv b) ==> fact1 a b)
= assert (bvult (int2bv a) (int2bv b) ==> fact1 a b)
by (T.norm [delta_only [`%fact1;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'";
smt())
let lem_ult_2 (a b:uint_t 64)
: squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
= assert (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
by (T.norm [delta_only [`%fact0;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'")
let int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b))
= introduce (a < b) ==> (bvult #n (int2bv #n a) (int2bv #n b))
with _ . FStar.BV.int2bv_lemma_ult_1 a b;
introduce (bvult #n (int2bv #n a) (int2bv #n b)) ==> (a < b)
with _ . FStar.BV.int2bv_lemma_ult_2 a b
let lem_ult (a b:uint_t 64)
: Lemma (if a < b
then fact1 a b
else fact0 a b)
= int2bv_ult a b;
lem_ult_1 a b;
lem_ult_2 a b
let constant_time_carry (a b: U64.t) : Tot U64.t =
let open U64 in
// CONSTANT_TIME_CARRY macro
// ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
// 63 = sizeof(a) * 8 - 1
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63ul
let carry_uint64_equiv (a b:UInt64.t)
: Lemma (U64.v (constant_time_carry a b) == carry_uint64 (U64.v a) (U64.v b))
= ()
// This type gets a special treatment in KaRaMeL and its definition is never
// printed in the resulting C file.
type uint128: Type0 = { low: U64.t; high: U64.t }
let t = uint128
let _ = intro_ambient n
let _ = intro_ambient t
[@@ noextract_to "krml"]
let v x = U64.v x.low + (U64.v x.high) * (pow2 64)
let div_mod (x:nat) (k:nat{k > 0}) : Lemma (x / k * k + x % k == x) = ()
let uint_to_t x =
div_mod x (pow2 64);
{ low = U64.uint_to_t (x % (pow2 64));
high = U64.uint_to_t (x / (pow2 64)); }
let v_inj (x1 x2: t): Lemma (requires (v x1 == v x2))
(ensures x1 == x2) =
assert (uint_to_t (v x1) == uint_to_t (v x2));
assert (uint_to_t (v x1) == x1);
assert (uint_to_t (v x2) == x2);
()
(* A weird helper used below... seems like the native encoding of
bitvectors may be making these proofs much harder than they should be? *)
let bv2int_fun (#n:pos) (a b : bv_t n)
: Lemma (requires a == b)
(ensures bv2int a == bv2int b)
= ()
(* This proof is quite brittle. It has a bunch of annotations to get
decent verification performance. *)
let constant_time_carry_ok (a b:U64.t)
: Lemma (constant_time_carry a b ==
(if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0))
= calc (==) {
U64.v (constant_time_carry a b);
(==) { carry_uint64_equiv a b }
carry_uint64 (U64.v a) (U64.v b);
(==) { inverse_num_lemma (carry_uint64 (U64.v a) (U64.v b)) }
bv2int (int2bv (carry_uint64 (U64.v a) (U64.v b)));
(==) { carry_uint64_ok (U64.v a) (U64.v b);
bv2int_fun (int2bv (carry_uint64 (U64.v a) (U64.v b))) (carry_bv (U64.v a) (U64.v b));
()
}
bv2int (carry_bv (U64.v a) (U64.v b));
(==) {
lem_ult (U64.v a) (U64.v b);
bv2int_fun (carry_bv (U64.v a) (U64.v b)) (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
}
bv2int
(if U64.v a < U64.v b
then int2bv 1
else int2bv 0);
};
assert (
bv2int (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
== U64.v (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)) by (T.norm []);
U64.v_inj (constant_time_carry a b) (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)
let carry (a b: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r == (if U64.v a < U64.v b then 1 else 0))) =
constant_time_carry_ok a b;
constant_time_carry a b
let carry_sum_ok (a b:U64.t) :
Lemma (U64.v (carry (U64.add_mod a b) b) == (U64.v a + U64.v b) / (pow2 64)) = ()
let add (a b: t) : Pure t
(requires (v a + v b < pow2 128))
(ensures (fun r -> v a + v b = v r)) =
let l = U64.add_mod a.low b.low in
carry_sum_ok a.low b.low;
{ low = l;
high = U64.add (U64.add a.high b.high) (carry l b.low); }
let add_underspec (a b: t) =
let l = U64.add_mod a.low b.low in
begin
if v a + v b < pow2 128
then carry_sum_ok a.low b.low
else ()
end;
{ low = l;
high = U64.add_underspec (U64.add_underspec a.high b.high) (carry l b.low); }
val mod_mod: a:nat -> k:nat{k>0} -> k':nat{k'>0} ->
Lemma ((a % k) % (k'*k) == a % k)
let mod_mod a k k' =
assert (a % k < k);
assert (a % k < k' * k)
let mod_spec (a:nat) (k:nat{k > 0}) :
Lemma (a % k == a - a / k * k) = ()
val div_product : n:nat -> m1:nat{m1>0} -> m2:nat{m2>0} ->
Lemma (n / (m1*m2) == (n / m1) / m2)
let div_product n m1 m2 =
Math.division_multiplication_lemma n m1 m2
val mul_div_cancel : n:nat -> k:nat{k>0} ->
Lemma ((n * k) / k == n)
let mul_div_cancel n k =
Math.cancel_mul_div n k
val mod_mul: n:nat -> k1:pos -> k2:pos ->
Lemma ((n % k2) * k1 == (n * k1) % (k1*k2))
let mod_mul n k1 k2 =
Math.modulo_scale_lemma n k1 k2
let mod_spec_rew_n (n:nat) (k:nat{k > 0}) :
Lemma (n == n / k * k + n % k) = mod_spec n k
val mod_add: n1:nat -> n2:nat -> k:nat{k > 0} ->
Lemma ((n1 % k + n2 % k) % k == (n1 + n2) % k)
let mod_add n1 n2 k = Math.modulo_distributivity n1 n2 k
val mod_add_small: n1:nat -> n2:nat -> k:nat{k > 0} -> Lemma
(requires (n1 % k + n2 % k < k))
(ensures (n1 % k + n2 % k == (n1 + n2) % k))
let mod_add_small n1 n2 k =
mod_add n1 n2 k;
Math.small_modulo_lemma_1 (n1%k + n2%k) k
// This proof is pretty stable with the calc proof, but it can fail
// ~1% of the times, so add a retry.
#push-options "--split_queries no --z3rlimit 20 --retry 5"
let add_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a + v b) % pow2 128 = v r)) =
let l = U64.add_mod a.low b.low in
let r = { low = l;
high = U64.add_mod (U64.add_mod a.high b.high) (carry l b.low)} in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
let b_l = U64.v b.low in
let b_h = U64.v b.high in
carry_sum_ok a.low b.low;
Math.lemma_mod_plus_distr_l (a_h + b_h) ((a_l + b_l) / (pow2 64)) (pow2 64);
calc (==) {
U64.v r.high * pow2 64;
== {}
((a_h + b_h + (a_l + b_l) / (pow2 64)) % pow2 64) * pow2 64;
== { mod_mul (a_h + b_h + (a_l + b_l) / (pow2 64)) (pow2 64) (pow2 64) }
((a_h + b_h + (a_l + b_l) / (pow2 64)) * pow2 64) % (pow2 64 * pow2 64);
== {}
((a_h + b_h + (a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
== {}
(a_h * pow2 64 + b_h * pow2 64 + ((a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
};
assert (U64.v r.low == (U64.v r.low) % pow2 128);
mod_add_small (a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64))
((a_l + b_l) % (pow2 64))
(pow2 128);
assert (U64.v r.low + U64.v r.high * pow2 64 ==
(a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64) + (a_l + b_l) % (pow2 64)) % pow2 128);
mod_spec_rew_n (a_l + b_l) (pow2 64);
assert (v r ==
(a_h * pow2 64 +
b_h * pow2 64 +
a_l + b_l) % pow2 128);
assert_spinoff ((v a + v b) % pow2 128 = v r);
r
#pop-options
#push-options "--retry 5"
let sub (a b: t) : Pure t
(requires (v a - v b >= 0))
(ensures (fun r -> v r = v a - v b)) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub (U64.sub a.high b.high) (carry a.low l); }
#pop-options
let sub_underspec (a b: t) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_underspec (U64.sub_underspec a.high b.high) (carry a.low l); }
let sub_mod_impl (a b: t) : t =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_mod (U64.sub_mod a.high b.high) (carry a.low l); }
#push-options "--retry 10" // flaky
let sub_mod_pos_ok (a b:t) : Lemma
(requires (v a - v b >= 0))
(ensures (v (sub_mod_impl a b) = v a - v b)) =
assert (sub a b == sub_mod_impl a b);
()
#pop-options
val u64_diff_wrap : a:U64.t -> b:U64.t ->
Lemma (requires (U64.v a < U64.v b))
(ensures (U64.v (U64.sub_mod a b) == U64.v a - U64.v b + pow2 64))
let u64_diff_wrap a b = ()
#push-options "--z3rlimit 20"
val sub_mod_wrap1_ok : a:t -> b:t -> Lemma
(requires (v a - v b < 0 /\ U64.v a.low < U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128))
#push-options "--retry 10"
let sub_mod_wrap1_ok a b =
// carry == 1 and subtraction in low wraps
let l = U64.sub_mod a.low b.low in
assert (U64.v (carry a.low l) == 1);
u64_diff_wrap a.low b.low;
// a.high <= b.high since v a < v b;
// case split on equality and strictly less
if U64.v a.high = U64.v b.high then ()
else begin
u64_diff_wrap a.high b.high;
()
end
#pop-options
let sum_lt (a1 a2 b1 b2:nat) : Lemma
(requires (a1 + a2 < b1 + b2 /\ a1 >= b1))
(ensures (a2 < b2)) = ()
let sub_mod_wrap2_ok (a b:t) : Lemma
(requires (v a - v b < 0 /\ U64.v a.low >= U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
// carry == 0, subtraction in low is exact, but subtraction in high
// must wrap since v a < v b
let l = U64.sub_mod a.low b.low in
let r = sub_mod_impl a b in
assert (U64.v l == U64.v a.low - U64.v b.low);
assert (U64.v (carry a.low l) == 0);
sum_lt (U64.v a.low) (U64.v a.high * pow2 64) (U64.v b.low) (U64.v b.high * pow2 64);
assert (U64.v (U64.sub_mod a.high b.high) == U64.v a.high - U64.v b.high + pow2 64);
()
let sub_mod_wrap_ok (a b:t) : Lemma
(requires (v a - v b < 0))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
if U64.v a.low < U64.v b.low
then sub_mod_wrap1_ok a b
else sub_mod_wrap2_ok a b
#push-options "--z3rlimit 40"
let sub_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = (v a - v b) % pow2 128)) =
(if v a - v b >= 0
then sub_mod_pos_ok a b
else sub_mod_wrap_ok a b);
sub_mod_impl a b
#pop-options
val shift_bound : #n:nat -> num:UInt.uint_t n -> n':nat ->
Lemma (num * pow2 n' <= pow2 (n'+n) - pow2 n')
let shift_bound #n num n' =
Math.lemma_mult_le_right (pow2 n') num (pow2 n - 1);
Math.distributivity_sub_left (pow2 n) 1 (pow2 n');
Math.pow2_plus n' n
val append_uint : #n1:nat -> #n2:nat -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 -> UInt.uint_t (n1+n2)
let append_uint #n1 #n2 num1 num2 =
shift_bound num2 n1;
num1 + num2 * pow2 n1
val to_vec_append : #n1:nat{n1 > 0} -> #n2:nat{n2 > 0} -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 ->
Lemma (UInt.to_vec (append_uint num1 num2) == Seq.append (UInt.to_vec num2) (UInt.to_vec num1))
let to_vec_append #n1 #n2 num1 num2 =
UInt.append_lemma (UInt.to_vec num2) (UInt.to_vec num1)
let vec128 (a: t) : BV.bv_t 128 = UInt.to_vec #128 (v a)
let vec64 (a: U64.t) : BV.bv_t 64 = UInt.to_vec (U64.v a)
let to_vec_v (a: t) :
Lemma (vec128 a == Seq.append (vec64 a.high) (vec64 a.low)) =
to_vec_append (U64.v a.low) (U64.v a.high)
val logand_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2) ==
BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2))
(BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logand #128 (v a) (v b))) =
let r = { low = U64.logand a.low b.low;
high = U64.logand a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logand_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logand_vec (vec128 a) (vec128 b));
r
val logxor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2) ==
BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2))
(BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logxor #128 (v a) (v b))) =
let r = { low = U64.logxor a.low b.low;
high = U64.logxor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logxor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logxor_vec (vec128 a) (vec128 b));
r
val logor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2) ==
BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2))
(BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logor #128 (v a) (v b))) =
let r = { low = U64.logor a.low b.low;
high = U64.logor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logor_vec (vec128 a) (vec128 b));
r
val lognot_vec_append (#n1 #n2: pos) (a1: BV.bv_t n1) (a2: BV.bv_t n2) :
Lemma (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2) ==
BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot_vec_append #n1 #n2 a1 a2 =
Seq.lemma_eq_intro (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2))
(BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot (a: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.lognot #128 (v a))) =
let r = { low = U64.lognot a.low;
high = U64.lognot a.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
lognot_vec_append (vec64 a.high) (vec64 a.low);
to_vec_v a;
assert (vec128 r == BV.lognot_vec (vec128 a));
r
let mod_mul_cancel (n:nat) (k:nat{k > 0}) :
Lemma ((n * k) % k == 0) =
mod_spec (n * k) k;
mul_div_cancel n k;
()
let shift_past_mod (n:nat) (k1:nat) (k2:nat{k2 >= k1}) :
Lemma ((n * pow2 k2) % pow2 k1 == 0) =
assert (k2 == (k2 - k1) + k1);
Math.pow2_plus (k2 - k1) k1;
Math.paren_mul_right n (pow2 (k2 - k1)) (pow2 k1);
mod_mul_cancel (n * pow2 (k2 - k1)) (pow2 k1)
val mod_double: a:nat -> k:nat{k>0} ->
Lemma (a % k % k == a % k)
let mod_double a k =
mod_mod a k 1
let shift_left_large_val (#n1:nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s:nat) :
Lemma ((a1 + a2 * pow2 n1) * pow2 s == (a1 * pow2 s + a2 * pow2 (n1+s))) =
Math.distributivity_add_left a1 (a2 * pow2 n1) (pow2 s);
Math.paren_mul_right a2 (pow2 n1) (pow2 s);
Math.pow2_plus n1 s
#push-options "--z3rlimit 40"
let shift_left_large_lemma (#n1: nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s: nat{s >= n2}) :
Lemma (((a1 + a2 * pow2 n1) * pow2 s) % pow2 (n1+n2) ==
(a1 * pow2 s) % pow2 (n1+n2)) =
shift_left_large_val a1 a2 s;
mod_add (a1 * pow2 s) (a2 * pow2 (n1+s)) (pow2 (n1+n2));
shift_past_mod a2 (n1+n2) (n1+s);
mod_double (a1 * pow2 s) (pow2 (n1+n2));
()
#pop-options
val shift_left_large_lemma_t : a:t -> s:nat ->
Lemma (requires (s >= 64))
(ensures ((v a * pow2 s) % pow2 128 ==
(U64.v a.low * pow2 s) % pow2 128))
let shift_left_large_lemma_t a s =
shift_left_large_lemma #64 #64 (U64.v a.low) (U64.v a.high) s
private let u32_64: n:U32.t{U32.v n == 64} = U32.uint_to_t 64
val div_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} -> Lemma
(requires True)
(ensures (a / pow2 (n1 - n2) == a * pow2 n2 / pow2 n1))
let div_pow2_diff a n1 n2 =
Math.pow2_plus n2 (n1-n2);
assert (a * pow2 n2 / pow2 n1 == a * pow2 n2 / (pow2 n2 * pow2 (n1 - n2)));
div_product (a * pow2 n2) (pow2 n2) (pow2 (n1-n2));
mul_div_cancel a (pow2 n2)
val mod_mul_pow2 : n:nat -> e1:nat -> e2:nat ->
Lemma (n % pow2 e1 * pow2 e2 <= pow2 (e1+e2) - pow2 e2)
let mod_mul_pow2 n e1 e2 =
Math.lemma_mod_lt n (pow2 e1);
Math.lemma_mult_le_right (pow2 e2) (n % pow2 e1) (pow2 e1 - 1);
assert (n % pow2 e1 * pow2 e2 <= pow2 e1 * pow2 e2 - pow2 e2);
Math.pow2_plus e1 e2
let pow2_div_bound #b (n:UInt.uint_t b) (s:nat{s <= b}) :
Lemma (n / pow2 s < pow2 (b - s)) =
Math.lemma_div_lt n b s
#push-options "--smtencoding.l_arith_repr native --z3rlimit 40"
let add_u64_shift_left (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r = (U64.v hi * pow2 (U32.v s)) % pow2 64 + U64.v lo / pow2 (64 - U32.v s))) =
let high = U64.shift_left hi s in
let low = U64.shift_right lo (U32.sub u32_64 s) in
let s = U32.v s in
let high_n = U64.v hi % pow2 (64 - s) * pow2 s in
let low_n = U64.v lo / pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 s) (pow2 (64-s));
assert (U64.v high == high_n);
assert (U64.v low == low_n);
pow2_div_bound (U64.v lo) (64-s);
assert (low_n < pow2 s);
mod_mul_pow2 (U64.v hi) (64 - s) s;
U64.add high low
#pop-options
let div_plus_multiple (a:nat) (b:nat) (k:pos) :
Lemma (requires (a < k))
(ensures ((a + b * k) / k == b)) =
Math.division_addition_lemma a k b;
Math.small_division_lemma_1 a k
val div_add_small: n:nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (k1*m / (k1*k2) == (n + k1*m) / (k1*k2)))
let div_add_small n m k1 k2 =
div_product (k1*m) k1 k2;
div_product (n+k1*m) k1 k2;
mul_div_cancel m k1;
assert (k1*m/k1 == m);
div_plus_multiple n m k1
val add_mod_small: n: nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (n + (k1 * m) % (k1 * k2) ==
(n + k1 * m) % (k1 * k2)))
let add_mod_small n m k1 k2 =
mod_spec (k1 * m) (k1 * k2);
mod_spec (n + k1 * m) (k1 * k2);
div_add_small n m k1 k2
let mod_then_mul_64 (n:nat) : Lemma (n % pow2 64 * pow2 64 == n * pow2 64 % pow2 128) =
Math.pow2_plus 64 64;
mod_mul n (pow2 64) (pow2 64)
let mul_abc_to_acb (a b c: int) : Lemma (a * b * c == a * c * b) = ()
let add_u64_shift_left_respec (hi lo:U64.t) (s:U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r ->
U64.v r * pow2 64 ==
(U64.v hi * pow2 64) * pow2 (U32.v s) % pow2 128 +
U64.v lo * pow2 (U32.v s) / pow2 64 * pow2 64)) =
let r = add_u64_shift_left hi lo s in
let hi = U64.v hi in
let lo = U64.v lo in
let s = U32.v s in
// spec of add_u64_shift_left
assert (U64.v r == hi * pow2 s % pow2 64 + lo / pow2 (64 - s));
Math.distributivity_add_left (hi * pow2 s % pow2 64) (lo / pow2 (64-s)) (pow2 64);
mod_then_mul_64 (hi * pow2 s);
assert (hi * pow2 s % pow2 64 * pow2 64 == (hi * pow2 s * pow2 64) % pow2 128);
div_pow2_diff lo 64 s;
assert (lo / pow2 (64-s) == lo * pow2 s / pow2 64);
assert (U64.v r * pow2 64 == hi * pow2 s * pow2 64 % pow2 128 + lo * pow2 s / pow2 64 * pow2 64);
mul_abc_to_acb hi (pow2 s) (pow2 64);
r
val add_mod_small' : n:nat -> m:nat -> k:pos ->
Lemma (requires (n + m % k < k))
(ensures (n + m % k == (n + m) % k))
let add_mod_small' n m k =
Math.lemma_mod_lt (n + m % k) k;
Math.modulo_lemma n k;
mod_add n m k
#push-options "--retry 5"
let shift_t_val (a: t) (s: nat) :
Lemma (v a * pow2 s == U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s)) =
Math.pow2_plus 64 s;
()
#pop-options
val mul_mod_bound : n:nat -> s1:nat -> s2:nat{s2>=s1} ->
Lemma (n * pow2 s1 % pow2 s2 <= pow2 s2 - pow2 s1)
#push-options "--retry 5"
let mul_mod_bound n s1 s2 =
// n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1
// n % pow2 (s2-s1) <= pow2 (s2-s1) - 1
// n % pow2 (s2-s1) * pow2 s1 <= pow2 s2 - pow2 s1
mod_mul n (pow2 s1) (pow2 (s2-s1));
// assert (n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1);
Math.lemma_mod_lt n (pow2 (s2-s1));
Math.lemma_mult_le_right (pow2 s1) (n % pow2 (s2-s1)) (pow2 (s2-s1) - 1);
Math.pow2_plus (s2-s1) s1
#pop-options
let add_lt_le (a a' b b': int) :
Lemma (requires (a < a' /\ b <= b'))
(ensures (a + b < a' + b')) = ()
let u64_pow2_bound (a: UInt.uint_t 64) (s: nat) :
Lemma (a * pow2 s < pow2 (64+s)) =
Math.pow2_plus 64 s;
Math.lemma_mult_le_right (pow2 s) a (pow2 64)
let shift_t_mod_val' (a: t) (s: nat{s < 64}) : | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val shift_t_mod_val' (a: t) (s: nat{s < 64})
: Lemma
((v a * pow2 s) % pow2 128 == U64.v a.low * pow2 s + U64.v a.high * pow2 (64 + s) % pow2 128) | [] | FStar.UInt128.shift_t_mod_val' | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: FStar.UInt128.t -> s: Prims.nat{s < 64}
-> FStar.Pervasives.Lemma
(ensures
FStar.UInt128.v a * Prims.pow2 s % Prims.pow2 128 ==
FStar.UInt64.v (Mkuint128?.low a) * Prims.pow2 s +
FStar.UInt64.v (Mkuint128?.high a) * Prims.pow2 (64 + s) % Prims.pow2 128) | {
"end_col": 4,
"end_line": 664,
"start_col": 71,
"start_line": 655
} |
FStar.Pervasives.Lemma | val mod_spec_multiply : n:nat -> k:pos ->
Lemma ((n - n%k) / k * k == n - n%k) | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let mod_spec_multiply n k =
Math.lemma_mod_spec2 n k | val mod_spec_multiply : n:nat -> k:pos ->
Lemma ((n - n%k) / k * k == n - n%k)
let mod_spec_multiply n k = | false | null | true | Math.lemma_mod_spec2 n k | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [
"lemma"
] | [
"Prims.nat",
"Prims.pos",
"FStar.Math.Lemmas.lemma_mod_spec2",
"Prims.unit"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract
let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63
let carry_uint64_ok (a b:uint_t 64)
: squash (int2bv (carry_uint64 a b) == carry_bv a b)
= _ by (T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl())
let fact1 (a b: uint_t 64) = carry_bv a b == int2bv 1
let fact0 (a b: uint_t 64) = carry_bv a b == int2bv 0
let lem_ult_1 (a b: uint_t 64)
: squash (bvult (int2bv a) (int2bv b) ==> fact1 a b)
= assert (bvult (int2bv a) (int2bv b) ==> fact1 a b)
by (T.norm [delta_only [`%fact1;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'";
smt())
let lem_ult_2 (a b:uint_t 64)
: squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
= assert (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
by (T.norm [delta_only [`%fact0;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'")
let int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b))
= introduce (a < b) ==> (bvult #n (int2bv #n a) (int2bv #n b))
with _ . FStar.BV.int2bv_lemma_ult_1 a b;
introduce (bvult #n (int2bv #n a) (int2bv #n b)) ==> (a < b)
with _ . FStar.BV.int2bv_lemma_ult_2 a b
let lem_ult (a b:uint_t 64)
: Lemma (if a < b
then fact1 a b
else fact0 a b)
= int2bv_ult a b;
lem_ult_1 a b;
lem_ult_2 a b
let constant_time_carry (a b: U64.t) : Tot U64.t =
let open U64 in
// CONSTANT_TIME_CARRY macro
// ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
// 63 = sizeof(a) * 8 - 1
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63ul
let carry_uint64_equiv (a b:UInt64.t)
: Lemma (U64.v (constant_time_carry a b) == carry_uint64 (U64.v a) (U64.v b))
= ()
// This type gets a special treatment in KaRaMeL and its definition is never
// printed in the resulting C file.
type uint128: Type0 = { low: U64.t; high: U64.t }
let t = uint128
let _ = intro_ambient n
let _ = intro_ambient t
[@@ noextract_to "krml"]
let v x = U64.v x.low + (U64.v x.high) * (pow2 64)
let div_mod (x:nat) (k:nat{k > 0}) : Lemma (x / k * k + x % k == x) = ()
let uint_to_t x =
div_mod x (pow2 64);
{ low = U64.uint_to_t (x % (pow2 64));
high = U64.uint_to_t (x / (pow2 64)); }
let v_inj (x1 x2: t): Lemma (requires (v x1 == v x2))
(ensures x1 == x2) =
assert (uint_to_t (v x1) == uint_to_t (v x2));
assert (uint_to_t (v x1) == x1);
assert (uint_to_t (v x2) == x2);
()
(* A weird helper used below... seems like the native encoding of
bitvectors may be making these proofs much harder than they should be? *)
let bv2int_fun (#n:pos) (a b : bv_t n)
: Lemma (requires a == b)
(ensures bv2int a == bv2int b)
= ()
(* This proof is quite brittle. It has a bunch of annotations to get
decent verification performance. *)
let constant_time_carry_ok (a b:U64.t)
: Lemma (constant_time_carry a b ==
(if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0))
= calc (==) {
U64.v (constant_time_carry a b);
(==) { carry_uint64_equiv a b }
carry_uint64 (U64.v a) (U64.v b);
(==) { inverse_num_lemma (carry_uint64 (U64.v a) (U64.v b)) }
bv2int (int2bv (carry_uint64 (U64.v a) (U64.v b)));
(==) { carry_uint64_ok (U64.v a) (U64.v b);
bv2int_fun (int2bv (carry_uint64 (U64.v a) (U64.v b))) (carry_bv (U64.v a) (U64.v b));
()
}
bv2int (carry_bv (U64.v a) (U64.v b));
(==) {
lem_ult (U64.v a) (U64.v b);
bv2int_fun (carry_bv (U64.v a) (U64.v b)) (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
}
bv2int
(if U64.v a < U64.v b
then int2bv 1
else int2bv 0);
};
assert (
bv2int (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
== U64.v (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)) by (T.norm []);
U64.v_inj (constant_time_carry a b) (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)
let carry (a b: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r == (if U64.v a < U64.v b then 1 else 0))) =
constant_time_carry_ok a b;
constant_time_carry a b
let carry_sum_ok (a b:U64.t) :
Lemma (U64.v (carry (U64.add_mod a b) b) == (U64.v a + U64.v b) / (pow2 64)) = ()
let add (a b: t) : Pure t
(requires (v a + v b < pow2 128))
(ensures (fun r -> v a + v b = v r)) =
let l = U64.add_mod a.low b.low in
carry_sum_ok a.low b.low;
{ low = l;
high = U64.add (U64.add a.high b.high) (carry l b.low); }
let add_underspec (a b: t) =
let l = U64.add_mod a.low b.low in
begin
if v a + v b < pow2 128
then carry_sum_ok a.low b.low
else ()
end;
{ low = l;
high = U64.add_underspec (U64.add_underspec a.high b.high) (carry l b.low); }
val mod_mod: a:nat -> k:nat{k>0} -> k':nat{k'>0} ->
Lemma ((a % k) % (k'*k) == a % k)
let mod_mod a k k' =
assert (a % k < k);
assert (a % k < k' * k)
let mod_spec (a:nat) (k:nat{k > 0}) :
Lemma (a % k == a - a / k * k) = ()
val div_product : n:nat -> m1:nat{m1>0} -> m2:nat{m2>0} ->
Lemma (n / (m1*m2) == (n / m1) / m2)
let div_product n m1 m2 =
Math.division_multiplication_lemma n m1 m2
val mul_div_cancel : n:nat -> k:nat{k>0} ->
Lemma ((n * k) / k == n)
let mul_div_cancel n k =
Math.cancel_mul_div n k
val mod_mul: n:nat -> k1:pos -> k2:pos ->
Lemma ((n % k2) * k1 == (n * k1) % (k1*k2))
let mod_mul n k1 k2 =
Math.modulo_scale_lemma n k1 k2
let mod_spec_rew_n (n:nat) (k:nat{k > 0}) :
Lemma (n == n / k * k + n % k) = mod_spec n k
val mod_add: n1:nat -> n2:nat -> k:nat{k > 0} ->
Lemma ((n1 % k + n2 % k) % k == (n1 + n2) % k)
let mod_add n1 n2 k = Math.modulo_distributivity n1 n2 k
val mod_add_small: n1:nat -> n2:nat -> k:nat{k > 0} -> Lemma
(requires (n1 % k + n2 % k < k))
(ensures (n1 % k + n2 % k == (n1 + n2) % k))
let mod_add_small n1 n2 k =
mod_add n1 n2 k;
Math.small_modulo_lemma_1 (n1%k + n2%k) k
// This proof is pretty stable with the calc proof, but it can fail
// ~1% of the times, so add a retry.
#push-options "--split_queries no --z3rlimit 20 --retry 5"
let add_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a + v b) % pow2 128 = v r)) =
let l = U64.add_mod a.low b.low in
let r = { low = l;
high = U64.add_mod (U64.add_mod a.high b.high) (carry l b.low)} in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
let b_l = U64.v b.low in
let b_h = U64.v b.high in
carry_sum_ok a.low b.low;
Math.lemma_mod_plus_distr_l (a_h + b_h) ((a_l + b_l) / (pow2 64)) (pow2 64);
calc (==) {
U64.v r.high * pow2 64;
== {}
((a_h + b_h + (a_l + b_l) / (pow2 64)) % pow2 64) * pow2 64;
== { mod_mul (a_h + b_h + (a_l + b_l) / (pow2 64)) (pow2 64) (pow2 64) }
((a_h + b_h + (a_l + b_l) / (pow2 64)) * pow2 64) % (pow2 64 * pow2 64);
== {}
((a_h + b_h + (a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
== {}
(a_h * pow2 64 + b_h * pow2 64 + ((a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
};
assert (U64.v r.low == (U64.v r.low) % pow2 128);
mod_add_small (a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64))
((a_l + b_l) % (pow2 64))
(pow2 128);
assert (U64.v r.low + U64.v r.high * pow2 64 ==
(a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64) + (a_l + b_l) % (pow2 64)) % pow2 128);
mod_spec_rew_n (a_l + b_l) (pow2 64);
assert (v r ==
(a_h * pow2 64 +
b_h * pow2 64 +
a_l + b_l) % pow2 128);
assert_spinoff ((v a + v b) % pow2 128 = v r);
r
#pop-options
#push-options "--retry 5"
let sub (a b: t) : Pure t
(requires (v a - v b >= 0))
(ensures (fun r -> v r = v a - v b)) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub (U64.sub a.high b.high) (carry a.low l); }
#pop-options
let sub_underspec (a b: t) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_underspec (U64.sub_underspec a.high b.high) (carry a.low l); }
let sub_mod_impl (a b: t) : t =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_mod (U64.sub_mod a.high b.high) (carry a.low l); }
#push-options "--retry 10" // flaky
let sub_mod_pos_ok (a b:t) : Lemma
(requires (v a - v b >= 0))
(ensures (v (sub_mod_impl a b) = v a - v b)) =
assert (sub a b == sub_mod_impl a b);
()
#pop-options
val u64_diff_wrap : a:U64.t -> b:U64.t ->
Lemma (requires (U64.v a < U64.v b))
(ensures (U64.v (U64.sub_mod a b) == U64.v a - U64.v b + pow2 64))
let u64_diff_wrap a b = ()
#push-options "--z3rlimit 20"
val sub_mod_wrap1_ok : a:t -> b:t -> Lemma
(requires (v a - v b < 0 /\ U64.v a.low < U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128))
#push-options "--retry 10"
let sub_mod_wrap1_ok a b =
// carry == 1 and subtraction in low wraps
let l = U64.sub_mod a.low b.low in
assert (U64.v (carry a.low l) == 1);
u64_diff_wrap a.low b.low;
// a.high <= b.high since v a < v b;
// case split on equality and strictly less
if U64.v a.high = U64.v b.high then ()
else begin
u64_diff_wrap a.high b.high;
()
end
#pop-options
let sum_lt (a1 a2 b1 b2:nat) : Lemma
(requires (a1 + a2 < b1 + b2 /\ a1 >= b1))
(ensures (a2 < b2)) = ()
let sub_mod_wrap2_ok (a b:t) : Lemma
(requires (v a - v b < 0 /\ U64.v a.low >= U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
// carry == 0, subtraction in low is exact, but subtraction in high
// must wrap since v a < v b
let l = U64.sub_mod a.low b.low in
let r = sub_mod_impl a b in
assert (U64.v l == U64.v a.low - U64.v b.low);
assert (U64.v (carry a.low l) == 0);
sum_lt (U64.v a.low) (U64.v a.high * pow2 64) (U64.v b.low) (U64.v b.high * pow2 64);
assert (U64.v (U64.sub_mod a.high b.high) == U64.v a.high - U64.v b.high + pow2 64);
()
let sub_mod_wrap_ok (a b:t) : Lemma
(requires (v a - v b < 0))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
if U64.v a.low < U64.v b.low
then sub_mod_wrap1_ok a b
else sub_mod_wrap2_ok a b
#push-options "--z3rlimit 40"
let sub_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = (v a - v b) % pow2 128)) =
(if v a - v b >= 0
then sub_mod_pos_ok a b
else sub_mod_wrap_ok a b);
sub_mod_impl a b
#pop-options
val shift_bound : #n:nat -> num:UInt.uint_t n -> n':nat ->
Lemma (num * pow2 n' <= pow2 (n'+n) - pow2 n')
let shift_bound #n num n' =
Math.lemma_mult_le_right (pow2 n') num (pow2 n - 1);
Math.distributivity_sub_left (pow2 n) 1 (pow2 n');
Math.pow2_plus n' n
val append_uint : #n1:nat -> #n2:nat -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 -> UInt.uint_t (n1+n2)
let append_uint #n1 #n2 num1 num2 =
shift_bound num2 n1;
num1 + num2 * pow2 n1
val to_vec_append : #n1:nat{n1 > 0} -> #n2:nat{n2 > 0} -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 ->
Lemma (UInt.to_vec (append_uint num1 num2) == Seq.append (UInt.to_vec num2) (UInt.to_vec num1))
let to_vec_append #n1 #n2 num1 num2 =
UInt.append_lemma (UInt.to_vec num2) (UInt.to_vec num1)
let vec128 (a: t) : BV.bv_t 128 = UInt.to_vec #128 (v a)
let vec64 (a: U64.t) : BV.bv_t 64 = UInt.to_vec (U64.v a)
let to_vec_v (a: t) :
Lemma (vec128 a == Seq.append (vec64 a.high) (vec64 a.low)) =
to_vec_append (U64.v a.low) (U64.v a.high)
val logand_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2) ==
BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2))
(BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logand #128 (v a) (v b))) =
let r = { low = U64.logand a.low b.low;
high = U64.logand a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logand_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logand_vec (vec128 a) (vec128 b));
r
val logxor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2) ==
BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2))
(BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logxor #128 (v a) (v b))) =
let r = { low = U64.logxor a.low b.low;
high = U64.logxor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logxor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logxor_vec (vec128 a) (vec128 b));
r
val logor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2) ==
BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2))
(BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logor #128 (v a) (v b))) =
let r = { low = U64.logor a.low b.low;
high = U64.logor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logor_vec (vec128 a) (vec128 b));
r
val lognot_vec_append (#n1 #n2: pos) (a1: BV.bv_t n1) (a2: BV.bv_t n2) :
Lemma (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2) ==
BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot_vec_append #n1 #n2 a1 a2 =
Seq.lemma_eq_intro (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2))
(BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot (a: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.lognot #128 (v a))) =
let r = { low = U64.lognot a.low;
high = U64.lognot a.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
lognot_vec_append (vec64 a.high) (vec64 a.low);
to_vec_v a;
assert (vec128 r == BV.lognot_vec (vec128 a));
r
let mod_mul_cancel (n:nat) (k:nat{k > 0}) :
Lemma ((n * k) % k == 0) =
mod_spec (n * k) k;
mul_div_cancel n k;
()
let shift_past_mod (n:nat) (k1:nat) (k2:nat{k2 >= k1}) :
Lemma ((n * pow2 k2) % pow2 k1 == 0) =
assert (k2 == (k2 - k1) + k1);
Math.pow2_plus (k2 - k1) k1;
Math.paren_mul_right n (pow2 (k2 - k1)) (pow2 k1);
mod_mul_cancel (n * pow2 (k2 - k1)) (pow2 k1)
val mod_double: a:nat -> k:nat{k>0} ->
Lemma (a % k % k == a % k)
let mod_double a k =
mod_mod a k 1
let shift_left_large_val (#n1:nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s:nat) :
Lemma ((a1 + a2 * pow2 n1) * pow2 s == (a1 * pow2 s + a2 * pow2 (n1+s))) =
Math.distributivity_add_left a1 (a2 * pow2 n1) (pow2 s);
Math.paren_mul_right a2 (pow2 n1) (pow2 s);
Math.pow2_plus n1 s
#push-options "--z3rlimit 40"
let shift_left_large_lemma (#n1: nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s: nat{s >= n2}) :
Lemma (((a1 + a2 * pow2 n1) * pow2 s) % pow2 (n1+n2) ==
(a1 * pow2 s) % pow2 (n1+n2)) =
shift_left_large_val a1 a2 s;
mod_add (a1 * pow2 s) (a2 * pow2 (n1+s)) (pow2 (n1+n2));
shift_past_mod a2 (n1+n2) (n1+s);
mod_double (a1 * pow2 s) (pow2 (n1+n2));
()
#pop-options
val shift_left_large_lemma_t : a:t -> s:nat ->
Lemma (requires (s >= 64))
(ensures ((v a * pow2 s) % pow2 128 ==
(U64.v a.low * pow2 s) % pow2 128))
let shift_left_large_lemma_t a s =
shift_left_large_lemma #64 #64 (U64.v a.low) (U64.v a.high) s
private let u32_64: n:U32.t{U32.v n == 64} = U32.uint_to_t 64
val div_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} -> Lemma
(requires True)
(ensures (a / pow2 (n1 - n2) == a * pow2 n2 / pow2 n1))
let div_pow2_diff a n1 n2 =
Math.pow2_plus n2 (n1-n2);
assert (a * pow2 n2 / pow2 n1 == a * pow2 n2 / (pow2 n2 * pow2 (n1 - n2)));
div_product (a * pow2 n2) (pow2 n2) (pow2 (n1-n2));
mul_div_cancel a (pow2 n2)
val mod_mul_pow2 : n:nat -> e1:nat -> e2:nat ->
Lemma (n % pow2 e1 * pow2 e2 <= pow2 (e1+e2) - pow2 e2)
let mod_mul_pow2 n e1 e2 =
Math.lemma_mod_lt n (pow2 e1);
Math.lemma_mult_le_right (pow2 e2) (n % pow2 e1) (pow2 e1 - 1);
assert (n % pow2 e1 * pow2 e2 <= pow2 e1 * pow2 e2 - pow2 e2);
Math.pow2_plus e1 e2
let pow2_div_bound #b (n:UInt.uint_t b) (s:nat{s <= b}) :
Lemma (n / pow2 s < pow2 (b - s)) =
Math.lemma_div_lt n b s
#push-options "--smtencoding.l_arith_repr native --z3rlimit 40"
let add_u64_shift_left (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r = (U64.v hi * pow2 (U32.v s)) % pow2 64 + U64.v lo / pow2 (64 - U32.v s))) =
let high = U64.shift_left hi s in
let low = U64.shift_right lo (U32.sub u32_64 s) in
let s = U32.v s in
let high_n = U64.v hi % pow2 (64 - s) * pow2 s in
let low_n = U64.v lo / pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 s) (pow2 (64-s));
assert (U64.v high == high_n);
assert (U64.v low == low_n);
pow2_div_bound (U64.v lo) (64-s);
assert (low_n < pow2 s);
mod_mul_pow2 (U64.v hi) (64 - s) s;
U64.add high low
#pop-options
let div_plus_multiple (a:nat) (b:nat) (k:pos) :
Lemma (requires (a < k))
(ensures ((a + b * k) / k == b)) =
Math.division_addition_lemma a k b;
Math.small_division_lemma_1 a k
val div_add_small: n:nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (k1*m / (k1*k2) == (n + k1*m) / (k1*k2)))
let div_add_small n m k1 k2 =
div_product (k1*m) k1 k2;
div_product (n+k1*m) k1 k2;
mul_div_cancel m k1;
assert (k1*m/k1 == m);
div_plus_multiple n m k1
val add_mod_small: n: nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (n + (k1 * m) % (k1 * k2) ==
(n + k1 * m) % (k1 * k2)))
let add_mod_small n m k1 k2 =
mod_spec (k1 * m) (k1 * k2);
mod_spec (n + k1 * m) (k1 * k2);
div_add_small n m k1 k2
let mod_then_mul_64 (n:nat) : Lemma (n % pow2 64 * pow2 64 == n * pow2 64 % pow2 128) =
Math.pow2_plus 64 64;
mod_mul n (pow2 64) (pow2 64)
let mul_abc_to_acb (a b c: int) : Lemma (a * b * c == a * c * b) = ()
let add_u64_shift_left_respec (hi lo:U64.t) (s:U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r ->
U64.v r * pow2 64 ==
(U64.v hi * pow2 64) * pow2 (U32.v s) % pow2 128 +
U64.v lo * pow2 (U32.v s) / pow2 64 * pow2 64)) =
let r = add_u64_shift_left hi lo s in
let hi = U64.v hi in
let lo = U64.v lo in
let s = U32.v s in
// spec of add_u64_shift_left
assert (U64.v r == hi * pow2 s % pow2 64 + lo / pow2 (64 - s));
Math.distributivity_add_left (hi * pow2 s % pow2 64) (lo / pow2 (64-s)) (pow2 64);
mod_then_mul_64 (hi * pow2 s);
assert (hi * pow2 s % pow2 64 * pow2 64 == (hi * pow2 s * pow2 64) % pow2 128);
div_pow2_diff lo 64 s;
assert (lo / pow2 (64-s) == lo * pow2 s / pow2 64);
assert (U64.v r * pow2 64 == hi * pow2 s * pow2 64 % pow2 128 + lo * pow2 s / pow2 64 * pow2 64);
mul_abc_to_acb hi (pow2 s) (pow2 64);
r
val add_mod_small' : n:nat -> m:nat -> k:pos ->
Lemma (requires (n + m % k < k))
(ensures (n + m % k == (n + m) % k))
let add_mod_small' n m k =
Math.lemma_mod_lt (n + m % k) k;
Math.modulo_lemma n k;
mod_add n m k
#push-options "--retry 5"
let shift_t_val (a: t) (s: nat) :
Lemma (v a * pow2 s == U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s)) =
Math.pow2_plus 64 s;
()
#pop-options
val mul_mod_bound : n:nat -> s1:nat -> s2:nat{s2>=s1} ->
Lemma (n * pow2 s1 % pow2 s2 <= pow2 s2 - pow2 s1)
#push-options "--retry 5"
let mul_mod_bound n s1 s2 =
// n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1
// n % pow2 (s2-s1) <= pow2 (s2-s1) - 1
// n % pow2 (s2-s1) * pow2 s1 <= pow2 s2 - pow2 s1
mod_mul n (pow2 s1) (pow2 (s2-s1));
// assert (n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1);
Math.lemma_mod_lt n (pow2 (s2-s1));
Math.lemma_mult_le_right (pow2 s1) (n % pow2 (s2-s1)) (pow2 (s2-s1) - 1);
Math.pow2_plus (s2-s1) s1
#pop-options
let add_lt_le (a a' b b': int) :
Lemma (requires (a < a' /\ b <= b'))
(ensures (a + b < a' + b')) = ()
let u64_pow2_bound (a: UInt.uint_t 64) (s: nat) :
Lemma (a * pow2 s < pow2 (64+s)) =
Math.pow2_plus 64 s;
Math.lemma_mult_le_right (pow2 s) a (pow2 64)
let shift_t_mod_val' (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s) % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
u64_pow2_bound a_l s;
mul_mod_bound a_h (64+s) 128;
// assert (a_h * pow2 (64+s) % pow2 128 <= pow2 128 - pow2 (64+s));
add_lt_le (a_l * pow2 s) (pow2 (64+s)) (a_h * pow2 (64+s) % pow2 128) (pow2 128 - pow2 (64+s));
add_mod_small' (a_l * pow2 s) (a_h * pow2 (64+s)) (pow2 128);
shift_t_val a s;
()
let shift_t_mod_val (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + (U64.v a.high * pow2 64) * pow2 s % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
shift_t_mod_val' a s;
Math.pow2_plus 64 s;
Math.paren_mul_right a_h (pow2 64) (pow2 s);
()
#push-options "--z3rlimit 20"
let shift_left_small (a: t) (s: U32.t) : Pure t
(requires (U32.v s < 64))
(ensures (fun r -> v r = (v a * pow2 (U32.v s)) % pow2 128)) =
if U32.eq s 0ul then a
else
let r = { low = U64.shift_left a.low s;
high = add_u64_shift_left_respec a.high a.low s; } in
let s = U32.v s in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
mod_spec_rew_n (a_l * pow2 s) (pow2 64);
shift_t_mod_val a s;
r
#pop-options
val shift_left_large : a:t -> s:U32.t{U32.v s >= 64 /\ U32.v s < 128} ->
r:t{v r = (v a * pow2 (U32.v s)) % pow2 128}
#push-options "--z3rlimit 50 --retry 5" // sporadically fails
let shift_left_large a s =
let h_shift = U32.sub s u32_64 in
assert (U32.v h_shift < 64);
let r = { low = U64.uint_to_t 0;
high = U64.shift_left a.low h_shift; } in
assert (U64.v r.high == (U64.v a.low * pow2 (U32.v s - 64)) % pow2 64);
mod_mul (U64.v a.low * pow2 (U32.v s - 64)) (pow2 64) (pow2 64);
Math.pow2_plus (U32.v s - 64) 64;
assert (U64.v r.high * pow2 64 == (U64.v a.low * pow2 (U32.v s)) % pow2 128);
shift_left_large_lemma_t a (U32.v s);
r
#pop-options
let shift_left a s =
if (U32.lt s u32_64) then shift_left_small a s
else shift_left_large a s
let add_u64_shift_right (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r == U64.v lo / pow2 (U32.v s) +
U64.v hi * pow2 (64 - U32.v s) % pow2 64)) =
let low = U64.shift_right lo s in
let high = U64.shift_left hi (U32.sub u32_64 s) in
let s = U32.v s in
let low_n = U64.v lo / pow2 s in
let high_n = U64.v hi % pow2 s * pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 (64-s)) (pow2 s);
assert (U64.v high == high_n);
pow2_div_bound (U64.v lo) s;
assert (low_n < pow2 (64 - s));
mod_mul_pow2 (U64.v hi) s (64 - s);
U64.add low high
val mul_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} ->
Lemma (a * pow2 (n1 - n2) == a * pow2 n1 / pow2 n2)
let mul_pow2_diff a n1 n2 =
Math.paren_mul_right a (pow2 (n1-n2)) (pow2 n2);
mul_div_cancel (a * pow2 (n1 - n2)) (pow2 n2);
Math.pow2_plus (n1 - n2) n2
let add_u64_shift_right_respec (hi lo:U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r == U64.v lo / pow2 (U32.v s) +
U64.v hi * pow2 64 / pow2 (U32.v s) % pow2 64)) =
let r = add_u64_shift_right hi lo s in
let s = U32.v s in
mul_pow2_diff (U64.v hi) 64 s;
r
let mul_div_spec (n:nat) (k:pos) : Lemma (n / k * k == n - n % k) = ()
let mul_distr_sub (n1 n2:nat) (k:nat) : Lemma ((n1 - n2) * k == n1 * k - n2 * k) = ()
val div_product_comm : n1:nat -> k1:pos -> k2:pos ->
Lemma (n1 / k1 / k2 == n1 / k2 / k1)
let div_product_comm n1 k1 k2 =
div_product n1 k1 k2;
div_product n1 k2 k1
val shift_right_reconstruct : a_h:UInt.uint_t 64 -> s:nat{s < 64} ->
Lemma (a_h * pow2 (64-s) == a_h / pow2 s * pow2 64 + a_h * pow2 64 / pow2 s % pow2 64)
let shift_right_reconstruct a_h s =
mul_pow2_diff a_h 64 s;
mod_spec_rew_n (a_h * pow2 (64-s)) (pow2 64);
div_product_comm (a_h * pow2 64) (pow2 s) (pow2 64);
mul_div_cancel a_h (pow2 64);
assert (a_h / pow2 s * pow2 64 == a_h * pow2 64 / pow2 s / pow2 64 * pow2 64);
()
val u128_div_pow2 (a: t) (s:nat{s < 64}) :
Lemma (v a / pow2 s == U64.v a.low / pow2 s + U64.v a.high * pow2 (64 - s))
let u128_div_pow2 a s =
Math.pow2_plus (64-s) s;
Math.paren_mul_right (U64.v a.high) (pow2 (64-s)) (pow2 s);
Math.division_addition_lemma (U64.v a.low) (pow2 s) (U64.v a.high * pow2 (64 - s))
let shift_right_small (a: t) (s: U32.t{U32.v s < 64}) : Pure t
(requires True)
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
if U32.eq s 0ul then a
else
let r = { low = add_u64_shift_right_respec a.high a.low s;
high = U64.shift_right a.high s; } in
let a_h = U64.v a.high in
let a_l = U64.v a.low in
let s = U32.v s in
shift_right_reconstruct a_h s;
assert (v r == a_h * pow2 (64-s) + a_l / pow2 s);
u128_div_pow2 a s;
r
let shift_right_large (a: t) (s: U32.t{U32.v s >= 64 /\ U32.v s < 128}) : Pure t
(requires True)
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
let r = { high = U64.uint_to_t 0;
low = U64.shift_right a.high (U32.sub s u32_64); } in
let s = U32.v s in
Math.pow2_plus 64 (s - 64);
div_product (v a) (pow2 64) (pow2 (s - 64));
assert (v a / pow2 s == v a / pow2 64 / pow2 (s - 64));
div_plus_multiple (U64.v a.low) (U64.v a.high) (pow2 64);
r
let shift_right (a: t) (s: U32.t) : Pure t
(requires (U32.v s < 128))
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
if U32.lt s u32_64
then shift_right_small a s
else shift_right_large a s
let eq (a b:t) = U64.eq a.low b.low && U64.eq a.high b.high
let gt (a b:t) = U64.gt a.high b.high ||
(U64.eq a.high b.high && U64.gt a.low b.low)
let lt (a b:t) = U64.lt a.high b.high ||
(U64.eq a.high b.high && U64.lt a.low b.low)
let gte (a b:t) = U64.gt a.high b.high ||
(U64.eq a.high b.high && U64.gte a.low b.low)
let lte (a b:t) = U64.lt a.high b.high ||
(U64.eq a.high b.high && U64.lte a.low b.low)
let u64_logand_comm (a b:U64.t) : Lemma (U64.logand a b == U64.logand b a) =
UInt.logand_commutative (U64.v a) (U64.v b)
val u64_and_0 (a b:U64.t) :
Lemma (U64.v b = 0 ==> U64.v (U64.logand a b) = 0)
[SMTPat (U64.logand a b)]
let u64_and_0 a b = UInt.logand_lemma_1 (U64.v a)
let u64_0_and (a b:U64.t) :
Lemma (U64.v a = 0 ==> U64.v (U64.logand a b) = 0)
[SMTPat (U64.logand a b)] =
u64_logand_comm a b
val u64_1s_and (a b:U64.t) :
Lemma (U64.v a = pow2 64 - 1 /\
U64.v b = pow2 64 - 1 ==> U64.v (U64.logand a b) = pow2 64 - 1)
[SMTPat (U64.logand a b)]
let u64_1s_and a b = UInt.logand_lemma_2 (U64.v a)
let eq_mask (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a = v b ==> v r = pow2 128 - 1) /\ (v a <> v b ==> v r = 0))) =
let mask = U64.logand (U64.eq_mask a.low b.low)
(U64.eq_mask a.high b.high) in
{ low = mask; high = mask; }
private let gte_characterization (a b: t) :
Lemma (v a >= v b ==>
U64.v a.high > U64.v b.high \/
(U64.v a.high = U64.v b.high /\ U64.v a.low >= U64.v b.low)) = ()
private let lt_characterization (a b: t) :
Lemma (v a < v b ==>
U64.v a.high < U64.v b.high \/
(U64.v a.high = U64.v b.high /\ U64.v a.low < U64.v b.low)) = ()
let u64_logor_comm (a b:U64.t) : Lemma (U64.logor a b == U64.logor b a) =
UInt.logor_commutative (U64.v a) (U64.v b)
val u64_or_1 (a b:U64.t) :
Lemma (U64.v b = pow2 64 - 1 ==> U64.v (U64.logor a b) = pow2 64 - 1)
[SMTPat (U64.logor a b)]
let u64_or_1 a b = UInt.logor_lemma_2 (U64.v a)
let u64_1_or (a b:U64.t) :
Lemma (U64.v a = pow2 64 - 1 ==> U64.v (U64.logor a b) = pow2 64 - 1)
[SMTPat (U64.logor a b)] =
u64_logor_comm a b
val u64_or_0 (a b:U64.t) :
Lemma (U64.v a = 0 /\ U64.v b = 0 ==> U64.v (U64.logor a b) = 0)
[SMTPat (U64.logor a b)]
let u64_or_0 a b = UInt.logor_lemma_1 (U64.v a)
val u64_not_0 (a:U64.t) :
Lemma (U64.v a = 0 ==> U64.v (U64.lognot a) = pow2 64 - 1)
[SMTPat (U64.lognot a)]
let u64_not_0 a = UInt.lognot_lemma_1 #64
val u64_not_1 (a:U64.t) :
Lemma (U64.v a = pow2 64 - 1 ==> U64.v (U64.lognot a) = 0)
[SMTPat (U64.lognot a)]
let u64_not_1 a =
UInt.nth_lemma (UInt.lognot #64 (UInt.ones 64)) (UInt.zero 64)
let gte_mask (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a >= v b ==> v r = pow2 128 - 1) /\ (v a < v b ==> v r = 0))) =
let mask_hi_gte = U64.logand (U64.gte_mask a.high b.high)
(U64.lognot (U64.eq_mask a.high b.high)) in
let mask_lo_gte = U64.logand (U64.eq_mask a.high b.high)
(U64.gte_mask a.low b.low) in
let mask = U64.logor mask_hi_gte mask_lo_gte in
gte_characterization a b;
lt_characterization a b;
{ low = mask; high = mask; }
let uint64_to_uint128 (a:U64.t) = { low = a; high = U64.uint_to_t 0; }
let uint128_to_uint64 (a:t) : b:U64.t{U64.v b == v a % pow2 64} = a.low
inline_for_extraction
let u64_l32_mask: x:U64.t{U64.v x == pow2 32 - 1} = U64.uint_to_t 0xffffffff
let u64_mod_32 (a: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r = U64.v a % pow2 32)) =
UInt.logand_mask (U64.v a) 32;
U64.logand a u64_l32_mask
let u64_32_digits (a: U64.t) : Lemma (U64.v a / pow2 32 * pow2 32 + U64.v a % pow2 32 == U64.v a) =
div_mod (U64.v a) (pow2 32)
val mul32_digits : x:UInt.uint_t 64 -> y:UInt.uint_t 32 ->
Lemma (x * y == (x / pow2 32 * y) * pow2 32 + (x % pow2 32) * y)
let mul32_digits x y = ()
let u32_32 : x:U32.t{U32.v x == 32} = U32.uint_to_t 32
#push-options "--z3rlimit 30"
let u32_combine (hi lo: U64.t) : Pure U64.t
(requires (U64.v lo < pow2 32))
(ensures (fun r -> U64.v r = U64.v hi % pow2 32 * pow2 32 + U64.v lo)) =
U64.add lo (U64.shift_left hi u32_32)
#pop-options
let product_bound (a b:nat) (k:pos) :
Lemma (requires (a < k /\ b < k))
(ensures a * b <= k * k - 2*k + 1) =
Math.lemma_mult_le_right b a (k-1);
Math.lemma_mult_le_left (k-1) b (k-1)
val uint_product_bound : #n:nat -> a:UInt.uint_t n -> b:UInt.uint_t n ->
Lemma (a * b <= pow2 (2*n) - 2*(pow2 n) + 1)
let uint_product_bound #n a b =
product_bound a b (pow2 n);
Math.pow2_plus n n
val u32_product_bound : a:nat{a < pow2 32} -> b:nat{b < pow2 32} ->
Lemma (UInt.size (a * b) 64 /\ a * b < pow2 64 - pow2 32 - 1)
let u32_product_bound a b =
uint_product_bound #32 a b
let mul32 x y =
let x0 = u64_mod_32 x in
let x1 = U64.shift_right x u32_32 in
u32_product_bound (U64.v x0) (U32.v y);
let x0y = U64.mul x0 (FStar.Int.Cast.uint32_to_uint64 y) in
let x0yl = u64_mod_32 x0y in
let x0yh = U64.shift_right x0y u32_32 in
u32_product_bound (U64.v x1) (U32.v y);
// not in the original C code
let x1y' = U64.mul x1 (FStar.Int.Cast.uint32_to_uint64 y) in
let x1y = U64.add x1y' x0yh in
// correspondence with C:
// r0 = r.low
// r0 is written using u32_combine hi lo = lo + hi << 32
// r1 = r.high
let r = { low = u32_combine x1y x0yl;
high = U64.shift_right x1y u32_32; } in
u64_32_digits x;
//assert (U64.v x == U64.v x1 * pow2 32 + U64.v x0);
assert (U64.v x0y == U64.v x0 * U32.v y);
u64_32_digits x0y;
//assert (U64.v x0y == U64.v x0yh * pow2 32 + U64.v x0yl);
assert (U64.v x1y' == U64.v x / pow2 32 * U32.v y);
mul32_digits (U64.v x) (U32.v y);
assert (U64.v x * U32.v y == U64.v x1y' * pow2 32 + U64.v x0y);
r
let l32 (x: UInt.uint_t 64) : UInt.uint_t 32 = x % pow2 32
let h32 (x: UInt.uint_t 64) : UInt.uint_t 32 = x / pow2 32
val mul32_bound : x:UInt.uint_t 32 -> y:UInt.uint_t 32 ->
n:UInt.uint_t 64{n < pow2 64 - pow2 32 - 1 /\ n == x * y}
let mul32_bound x y =
u32_product_bound x y;
x * y
let pll (x y: U64.t) : n:UInt.uint_t 64{n < pow2 64 - pow2 32 - 1} =
mul32_bound (l32 (U64.v x)) (l32 (U64.v y))
let plh (x y: U64.t) : n:UInt.uint_t 64{n < pow2 64 - pow2 32 - 1} =
mul32_bound (l32 (U64.v x)) (h32 (U64.v y))
let phl (x y: U64.t) : n:UInt.uint_t 64{n < pow2 64 - pow2 32 - 1} =
mul32_bound (h32 (U64.v x)) (l32 (U64.v y))
let phh (x y: U64.t) : n:UInt.uint_t 64{n < pow2 64 - pow2 32 - 1} =
mul32_bound (h32 (U64.v x)) (h32 (U64.v y))
let pll_l (x y: U64.t) : UInt.uint_t 32 =
l32 (pll x y)
let pll_h (x y: U64.t) : UInt.uint_t 32 =
h32 (pll x y)
let mul_wide_low (x y: U64.t) = (plh x y + (phl x y + pll_h x y) % pow2 32) * pow2 32 % pow2 64 + pll_l x y
let mul_wide_high (x y: U64.t) =
phh x y +
(phl x y + pll_h x y) / pow2 32 +
(plh x y + (phl x y + pll_h x y) % pow2 32) / pow2 32
inline_for_extraction noextract
let mul_wide_impl_t' (x y: U64.t) : Pure (tuple4 U64.t U64.t U64.t U64.t)
(requires True)
(ensures (fun r -> let (u1, w3, x', t') = r in
U64.v u1 == U64.v x % pow2 32 /\
U64.v w3 == pll_l x y /\
U64.v x' == h32 (U64.v x) /\
U64.v t' == phl x y + pll_h x y)) =
let u1 = u64_mod_32 x in
let v1 = u64_mod_32 y in
u32_product_bound (U64.v u1) (U64.v v1);
let t = U64.mul u1 v1 in
assert (U64.v t == pll x y);
let w3 = u64_mod_32 t in
assert (U64.v w3 == pll_l x y);
let k = U64.shift_right t u32_32 in
assert (U64.v k == pll_h x y);
let x' = U64.shift_right x u32_32 in
assert (U64.v x' == h32 (U64.v x));
u32_product_bound (U64.v x') (U64.v v1);
let t' = U64.add (U64.mul x' v1) k in
(u1, w3, x', t')
// similar to u32_combine, but use % 2^64 * 2^32
let u32_combine' (hi lo: U64.t) : Pure U64.t
(requires (U64.v lo < pow2 32))
(ensures (fun r -> U64.v r = U64.v hi * pow2 32 % pow2 64 + U64.v lo)) =
U64.add lo (U64.shift_left hi u32_32)
inline_for_extraction noextract
let mul_wide_impl (x: U64.t) (y: U64.t) :
Tot (r:t{U64.v r.low == mul_wide_low x y /\
U64.v r.high == mul_wide_high x y % pow2 64}) =
let (u1, w3, x', t') = mul_wide_impl_t' x y in
let k' = u64_mod_32 t' in
let w1 = U64.shift_right t' u32_32 in
assert (U64.v w1 == (phl x y + pll_h x y) / pow2 32);
let y' = U64.shift_right y u32_32 in
assert (U64.v y' == h32 (U64.v y));
u32_product_bound (U64.v u1) (U64.v y');
let t'' = U64.add (U64.mul u1 y') k' in
assert (U64.v t'' == plh x y + (phl x y + pll_h x y) % pow2 32);
let k'' = U64.shift_right t'' u32_32 in
assert (U64.v k'' == (plh x y + (phl x y + pll_h x y) % pow2 32) / pow2 32);
u32_product_bound (U64.v x') (U64.v y');
mod_mul_pow2 (U64.v t'') 32 64;
let r0 = u32_combine' t'' w3 in
// let r0 = U64.add (U64.shift_left t'' u32_32) w3 in
assert (U64.v r0 == (plh x y + (phl x y + pll_h x y) % pow2 32) * pow2 32 % pow2 64 + pll_l x y);
let xy_w1 = U64.add (U64.mul x' y') w1 in
assert (U64.v xy_w1 == phh x y + (phl x y + pll_h x y) / pow2 32);
let r1 = U64.add_mod xy_w1 k'' in
assert (U64.v r1 == (phh x y + (phl x y + pll_h x y) / pow2 32 + (plh x y + (phl x y + pll_h x y) % pow2 32) / pow2 32) % pow2 64);
let r = { low = r0; high = r1; } in
r
let product_sums (a b c d:nat) :
Lemma ((a + b) * (c + d) == a * c + a * d + b * c + b * d) = ()
val u64_32_product (xl xh yl yh:UInt.uint_t 32) :
Lemma ((xl + xh * pow2 32) * (yl + yh * pow2 32) ==
xl * yl + (xl * yh) * pow2 32 + (xh * yl) * pow2 32 + (xh * yh) * pow2 64)
#push-options "--z3rlimit 25"
let u64_32_product xl xh yl yh =
assert (xh >= 0); //flakiness; without this, can't prove that (xh * pow2 32) >= 0
assert (pow2 32 >= 0); //flakiness; without this, can't prove that (xh * pow2 32) >= 0
assert (xh*pow2 32 >= 0);
product_sums xl (xh*pow2 32) yl (yh*pow2 32);
mul_abc_to_acb xh (pow2 32) yl;
assert (xl * (yh * pow2 32) == (xl * yh) * pow2 32);
Math.pow2_plus 32 32;
assert ((xh * pow2 32) * (yh * pow2 32) == (xh * yh) * pow2 64)
#pop-options
let product_expand (x y: U64.t) :
Lemma (U64.v x * U64.v y == phh x y * pow2 64 +
(plh x y + phl x y + pll_h x y) * pow2 32 +
pll_l x y) =
assert (U64.v x == l32 (U64.v x) + h32 (U64.v x) * pow2 32);
assert (U64.v y == l32 (U64.v y) + h32 (U64.v y) * pow2 32);
u64_32_product (l32 (U64.v x)) (h32 (U64.v x)) (l32 (U64.v y)) (h32 (U64.v y))
let product_low_expand (x y: U64.t) :
Lemma ((U64.v x * U64.v y) % pow2 64 ==
((plh x y + phl x y + pll_h x y) * pow2 32 + pll_l x y) % pow2 64) =
product_expand x y;
Math.lemma_mod_plus ((plh x y + phl x y + pll_h x y) * pow2 32 + pll_l x y) (phh x y) (pow2 64)
let add_mod_then_mod (n m:nat) (k:pos) :
Lemma ((n + m % k) % k == (n + m) % k) =
mod_add n m k;
mod_add n (m % k) k;
mod_double m k
let shift_add (n:nat) (m:nat{m < pow2 32}) :
Lemma (n * pow2 32 % pow2 64 + m == (n * pow2 32 + m) % pow2 64) =
add_mod_small' m (n*pow2 32) (pow2 64)
let mul_wide_low_ok (x y: U64.t) :
Lemma (mul_wide_low x y == (U64.v x * U64.v y) % pow2 64) =
Math.pow2_plus 32 32;
mod_mul (plh x y + (phl x y + pll_h x y) % pow2 32) (pow2 32) (pow2 32);
assert (mul_wide_low x y ==
(plh x y + (phl x y + pll_h x y) % pow2 32) % pow2 32 * pow2 32 + pll_l x y);
add_mod_then_mod (plh x y) (phl x y + pll_h x y) (pow2 32);
assert (mul_wide_low x y == (plh x y + phl x y + pll_h x y) % pow2 32 * pow2 32 + pll_l x y);
mod_mul (plh x y + phl x y + pll_h x y) (pow2 32) (pow2 32);
shift_add (plh x y + phl x y + pll_h x y) (pll_l x y);
assert (mul_wide_low x y == ((plh x y + phl x y + pll_h x y) * pow2 32 + pll_l x y) % pow2 64);
product_low_expand x y
val product_high32 : x:U64.t -> y:U64.t ->
Lemma ((U64.v x * U64.v y) / pow2 32 == phh x y * pow2 32 + plh x y + phl x y + pll_h x y)
#push-options "--z3rlimit 20"
let product_high32 x y =
Math.pow2_plus 32 32;
product_expand x y;
Math.division_addition_lemma (plh x y + phl x y + pll_h x y) (pow2 32) (phh x y * pow2 32);
mul_div_cancel (phh x y * pow2 32) (pow2 32);
mul_div_cancel (plh x y + phl x y + pll_h x y) (pow2 32);
Math.small_division_lemma_1 (pll_l x y) (pow2 32)
#pop-options
val product_high_expand : x:U64.t -> y:U64.t ->
Lemma ((U64.v x * U64.v y) / pow2 64 == phh x y + (plh x y + phl x y + pll_h x y) / pow2 32)
#push-options "--z3rlimit 15 --retry 5" // sporadically fails
let product_high_expand x y =
Math.pow2_plus 32 32;
div_product (mul_wide_high x y) (pow2 32) (pow2 32);
product_high32 x y;
Math.division_addition_lemma (plh x y + phl x y + pll_h x y) (pow2 32) (phh x y);
()
#pop-options
val mod_spec_multiply : n:nat -> k:pos ->
Lemma ((n - n%k) / k * k == n - n%k) | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val mod_spec_multiply : n:nat -> k:pos ->
Lemma ((n - n%k) / k * k == n - n%k) | [] | FStar.UInt128.mod_spec_multiply | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | n: Prims.nat -> k: Prims.pos -> FStar.Pervasives.Lemma (ensures ((n - n % k) / k) * k == n - n % k) | {
"end_col": 26,
"end_line": 1135,
"start_col": 2,
"start_line": 1135
} |
Prims.Pure | val u32_combine' (hi lo: U64.t)
: Pure U64.t
(requires (U64.v lo < pow2 32))
(ensures (fun r -> U64.v r = U64.v hi * pow2 32 % pow2 64 + U64.v lo)) | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let u32_combine' (hi lo: U64.t) : Pure U64.t
(requires (U64.v lo < pow2 32))
(ensures (fun r -> U64.v r = U64.v hi * pow2 32 % pow2 64 + U64.v lo)) =
U64.add lo (U64.shift_left hi u32_32) | val u32_combine' (hi lo: U64.t)
: Pure U64.t
(requires (U64.v lo < pow2 32))
(ensures (fun r -> U64.v r = U64.v hi * pow2 32 % pow2 64 + U64.v lo))
let u32_combine' (hi lo: U64.t)
: Pure U64.t
(requires (U64.v lo < pow2 32))
(ensures (fun r -> U64.v r = U64.v hi * pow2 32 % pow2 64 + U64.v lo)) = | false | null | false | U64.add lo (U64.shift_left hi u32_32) | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [] | [
"FStar.UInt64.t",
"FStar.UInt64.add",
"FStar.UInt64.shift_left",
"FStar.UInt128.u32_32",
"Prims.b2t",
"Prims.op_LessThan",
"FStar.UInt64.v",
"Prims.pow2",
"Prims.op_Equality",
"Prims.int",
"Prims.op_Addition",
"Prims.op_Modulus",
"FStar.Mul.op_Star"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract
let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63
let carry_uint64_ok (a b:uint_t 64)
: squash (int2bv (carry_uint64 a b) == carry_bv a b)
= _ by (T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl())
let fact1 (a b: uint_t 64) = carry_bv a b == int2bv 1
let fact0 (a b: uint_t 64) = carry_bv a b == int2bv 0
let lem_ult_1 (a b: uint_t 64)
: squash (bvult (int2bv a) (int2bv b) ==> fact1 a b)
= assert (bvult (int2bv a) (int2bv b) ==> fact1 a b)
by (T.norm [delta_only [`%fact1;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'";
smt())
let lem_ult_2 (a b:uint_t 64)
: squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
= assert (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
by (T.norm [delta_only [`%fact0;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'")
let int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b))
= introduce (a < b) ==> (bvult #n (int2bv #n a) (int2bv #n b))
with _ . FStar.BV.int2bv_lemma_ult_1 a b;
introduce (bvult #n (int2bv #n a) (int2bv #n b)) ==> (a < b)
with _ . FStar.BV.int2bv_lemma_ult_2 a b
let lem_ult (a b:uint_t 64)
: Lemma (if a < b
then fact1 a b
else fact0 a b)
= int2bv_ult a b;
lem_ult_1 a b;
lem_ult_2 a b
let constant_time_carry (a b: U64.t) : Tot U64.t =
let open U64 in
// CONSTANT_TIME_CARRY macro
// ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
// 63 = sizeof(a) * 8 - 1
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63ul
let carry_uint64_equiv (a b:UInt64.t)
: Lemma (U64.v (constant_time_carry a b) == carry_uint64 (U64.v a) (U64.v b))
= ()
// This type gets a special treatment in KaRaMeL and its definition is never
// printed in the resulting C file.
type uint128: Type0 = { low: U64.t; high: U64.t }
let t = uint128
let _ = intro_ambient n
let _ = intro_ambient t
[@@ noextract_to "krml"]
let v x = U64.v x.low + (U64.v x.high) * (pow2 64)
let div_mod (x:nat) (k:nat{k > 0}) : Lemma (x / k * k + x % k == x) = ()
let uint_to_t x =
div_mod x (pow2 64);
{ low = U64.uint_to_t (x % (pow2 64));
high = U64.uint_to_t (x / (pow2 64)); }
let v_inj (x1 x2: t): Lemma (requires (v x1 == v x2))
(ensures x1 == x2) =
assert (uint_to_t (v x1) == uint_to_t (v x2));
assert (uint_to_t (v x1) == x1);
assert (uint_to_t (v x2) == x2);
()
(* A weird helper used below... seems like the native encoding of
bitvectors may be making these proofs much harder than they should be? *)
let bv2int_fun (#n:pos) (a b : bv_t n)
: Lemma (requires a == b)
(ensures bv2int a == bv2int b)
= ()
(* This proof is quite brittle. It has a bunch of annotations to get
decent verification performance. *)
let constant_time_carry_ok (a b:U64.t)
: Lemma (constant_time_carry a b ==
(if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0))
= calc (==) {
U64.v (constant_time_carry a b);
(==) { carry_uint64_equiv a b }
carry_uint64 (U64.v a) (U64.v b);
(==) { inverse_num_lemma (carry_uint64 (U64.v a) (U64.v b)) }
bv2int (int2bv (carry_uint64 (U64.v a) (U64.v b)));
(==) { carry_uint64_ok (U64.v a) (U64.v b);
bv2int_fun (int2bv (carry_uint64 (U64.v a) (U64.v b))) (carry_bv (U64.v a) (U64.v b));
()
}
bv2int (carry_bv (U64.v a) (U64.v b));
(==) {
lem_ult (U64.v a) (U64.v b);
bv2int_fun (carry_bv (U64.v a) (U64.v b)) (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
}
bv2int
(if U64.v a < U64.v b
then int2bv 1
else int2bv 0);
};
assert (
bv2int (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
== U64.v (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)) by (T.norm []);
U64.v_inj (constant_time_carry a b) (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)
let carry (a b: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r == (if U64.v a < U64.v b then 1 else 0))) =
constant_time_carry_ok a b;
constant_time_carry a b
let carry_sum_ok (a b:U64.t) :
Lemma (U64.v (carry (U64.add_mod a b) b) == (U64.v a + U64.v b) / (pow2 64)) = ()
let add (a b: t) : Pure t
(requires (v a + v b < pow2 128))
(ensures (fun r -> v a + v b = v r)) =
let l = U64.add_mod a.low b.low in
carry_sum_ok a.low b.low;
{ low = l;
high = U64.add (U64.add a.high b.high) (carry l b.low); }
let add_underspec (a b: t) =
let l = U64.add_mod a.low b.low in
begin
if v a + v b < pow2 128
then carry_sum_ok a.low b.low
else ()
end;
{ low = l;
high = U64.add_underspec (U64.add_underspec a.high b.high) (carry l b.low); }
val mod_mod: a:nat -> k:nat{k>0} -> k':nat{k'>0} ->
Lemma ((a % k) % (k'*k) == a % k)
let mod_mod a k k' =
assert (a % k < k);
assert (a % k < k' * k)
let mod_spec (a:nat) (k:nat{k > 0}) :
Lemma (a % k == a - a / k * k) = ()
val div_product : n:nat -> m1:nat{m1>0} -> m2:nat{m2>0} ->
Lemma (n / (m1*m2) == (n / m1) / m2)
let div_product n m1 m2 =
Math.division_multiplication_lemma n m1 m2
val mul_div_cancel : n:nat -> k:nat{k>0} ->
Lemma ((n * k) / k == n)
let mul_div_cancel n k =
Math.cancel_mul_div n k
val mod_mul: n:nat -> k1:pos -> k2:pos ->
Lemma ((n % k2) * k1 == (n * k1) % (k1*k2))
let mod_mul n k1 k2 =
Math.modulo_scale_lemma n k1 k2
let mod_spec_rew_n (n:nat) (k:nat{k > 0}) :
Lemma (n == n / k * k + n % k) = mod_spec n k
val mod_add: n1:nat -> n2:nat -> k:nat{k > 0} ->
Lemma ((n1 % k + n2 % k) % k == (n1 + n2) % k)
let mod_add n1 n2 k = Math.modulo_distributivity n1 n2 k
val mod_add_small: n1:nat -> n2:nat -> k:nat{k > 0} -> Lemma
(requires (n1 % k + n2 % k < k))
(ensures (n1 % k + n2 % k == (n1 + n2) % k))
let mod_add_small n1 n2 k =
mod_add n1 n2 k;
Math.small_modulo_lemma_1 (n1%k + n2%k) k
// This proof is pretty stable with the calc proof, but it can fail
// ~1% of the times, so add a retry.
#push-options "--split_queries no --z3rlimit 20 --retry 5"
let add_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a + v b) % pow2 128 = v r)) =
let l = U64.add_mod a.low b.low in
let r = { low = l;
high = U64.add_mod (U64.add_mod a.high b.high) (carry l b.low)} in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
let b_l = U64.v b.low in
let b_h = U64.v b.high in
carry_sum_ok a.low b.low;
Math.lemma_mod_plus_distr_l (a_h + b_h) ((a_l + b_l) / (pow2 64)) (pow2 64);
calc (==) {
U64.v r.high * pow2 64;
== {}
((a_h + b_h + (a_l + b_l) / (pow2 64)) % pow2 64) * pow2 64;
== { mod_mul (a_h + b_h + (a_l + b_l) / (pow2 64)) (pow2 64) (pow2 64) }
((a_h + b_h + (a_l + b_l) / (pow2 64)) * pow2 64) % (pow2 64 * pow2 64);
== {}
((a_h + b_h + (a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
== {}
(a_h * pow2 64 + b_h * pow2 64 + ((a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
};
assert (U64.v r.low == (U64.v r.low) % pow2 128);
mod_add_small (a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64))
((a_l + b_l) % (pow2 64))
(pow2 128);
assert (U64.v r.low + U64.v r.high * pow2 64 ==
(a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64) + (a_l + b_l) % (pow2 64)) % pow2 128);
mod_spec_rew_n (a_l + b_l) (pow2 64);
assert (v r ==
(a_h * pow2 64 +
b_h * pow2 64 +
a_l + b_l) % pow2 128);
assert_spinoff ((v a + v b) % pow2 128 = v r);
r
#pop-options
#push-options "--retry 5"
let sub (a b: t) : Pure t
(requires (v a - v b >= 0))
(ensures (fun r -> v r = v a - v b)) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub (U64.sub a.high b.high) (carry a.low l); }
#pop-options
let sub_underspec (a b: t) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_underspec (U64.sub_underspec a.high b.high) (carry a.low l); }
let sub_mod_impl (a b: t) : t =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_mod (U64.sub_mod a.high b.high) (carry a.low l); }
#push-options "--retry 10" // flaky
let sub_mod_pos_ok (a b:t) : Lemma
(requires (v a - v b >= 0))
(ensures (v (sub_mod_impl a b) = v a - v b)) =
assert (sub a b == sub_mod_impl a b);
()
#pop-options
val u64_diff_wrap : a:U64.t -> b:U64.t ->
Lemma (requires (U64.v a < U64.v b))
(ensures (U64.v (U64.sub_mod a b) == U64.v a - U64.v b + pow2 64))
let u64_diff_wrap a b = ()
#push-options "--z3rlimit 20"
val sub_mod_wrap1_ok : a:t -> b:t -> Lemma
(requires (v a - v b < 0 /\ U64.v a.low < U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128))
#push-options "--retry 10"
let sub_mod_wrap1_ok a b =
// carry == 1 and subtraction in low wraps
let l = U64.sub_mod a.low b.low in
assert (U64.v (carry a.low l) == 1);
u64_diff_wrap a.low b.low;
// a.high <= b.high since v a < v b;
// case split on equality and strictly less
if U64.v a.high = U64.v b.high then ()
else begin
u64_diff_wrap a.high b.high;
()
end
#pop-options
let sum_lt (a1 a2 b1 b2:nat) : Lemma
(requires (a1 + a2 < b1 + b2 /\ a1 >= b1))
(ensures (a2 < b2)) = ()
let sub_mod_wrap2_ok (a b:t) : Lemma
(requires (v a - v b < 0 /\ U64.v a.low >= U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
// carry == 0, subtraction in low is exact, but subtraction in high
// must wrap since v a < v b
let l = U64.sub_mod a.low b.low in
let r = sub_mod_impl a b in
assert (U64.v l == U64.v a.low - U64.v b.low);
assert (U64.v (carry a.low l) == 0);
sum_lt (U64.v a.low) (U64.v a.high * pow2 64) (U64.v b.low) (U64.v b.high * pow2 64);
assert (U64.v (U64.sub_mod a.high b.high) == U64.v a.high - U64.v b.high + pow2 64);
()
let sub_mod_wrap_ok (a b:t) : Lemma
(requires (v a - v b < 0))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
if U64.v a.low < U64.v b.low
then sub_mod_wrap1_ok a b
else sub_mod_wrap2_ok a b
#push-options "--z3rlimit 40"
let sub_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = (v a - v b) % pow2 128)) =
(if v a - v b >= 0
then sub_mod_pos_ok a b
else sub_mod_wrap_ok a b);
sub_mod_impl a b
#pop-options
val shift_bound : #n:nat -> num:UInt.uint_t n -> n':nat ->
Lemma (num * pow2 n' <= pow2 (n'+n) - pow2 n')
let shift_bound #n num n' =
Math.lemma_mult_le_right (pow2 n') num (pow2 n - 1);
Math.distributivity_sub_left (pow2 n) 1 (pow2 n');
Math.pow2_plus n' n
val append_uint : #n1:nat -> #n2:nat -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 -> UInt.uint_t (n1+n2)
let append_uint #n1 #n2 num1 num2 =
shift_bound num2 n1;
num1 + num2 * pow2 n1
val to_vec_append : #n1:nat{n1 > 0} -> #n2:nat{n2 > 0} -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 ->
Lemma (UInt.to_vec (append_uint num1 num2) == Seq.append (UInt.to_vec num2) (UInt.to_vec num1))
let to_vec_append #n1 #n2 num1 num2 =
UInt.append_lemma (UInt.to_vec num2) (UInt.to_vec num1)
let vec128 (a: t) : BV.bv_t 128 = UInt.to_vec #128 (v a)
let vec64 (a: U64.t) : BV.bv_t 64 = UInt.to_vec (U64.v a)
let to_vec_v (a: t) :
Lemma (vec128 a == Seq.append (vec64 a.high) (vec64 a.low)) =
to_vec_append (U64.v a.low) (U64.v a.high)
val logand_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2) ==
BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2))
(BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logand #128 (v a) (v b))) =
let r = { low = U64.logand a.low b.low;
high = U64.logand a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logand_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logand_vec (vec128 a) (vec128 b));
r
val logxor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2) ==
BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2))
(BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logxor #128 (v a) (v b))) =
let r = { low = U64.logxor a.low b.low;
high = U64.logxor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logxor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logxor_vec (vec128 a) (vec128 b));
r
val logor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2) ==
BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2))
(BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logor #128 (v a) (v b))) =
let r = { low = U64.logor a.low b.low;
high = U64.logor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logor_vec (vec128 a) (vec128 b));
r
val lognot_vec_append (#n1 #n2: pos) (a1: BV.bv_t n1) (a2: BV.bv_t n2) :
Lemma (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2) ==
BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot_vec_append #n1 #n2 a1 a2 =
Seq.lemma_eq_intro (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2))
(BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot (a: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.lognot #128 (v a))) =
let r = { low = U64.lognot a.low;
high = U64.lognot a.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
lognot_vec_append (vec64 a.high) (vec64 a.low);
to_vec_v a;
assert (vec128 r == BV.lognot_vec (vec128 a));
r
let mod_mul_cancel (n:nat) (k:nat{k > 0}) :
Lemma ((n * k) % k == 0) =
mod_spec (n * k) k;
mul_div_cancel n k;
()
let shift_past_mod (n:nat) (k1:nat) (k2:nat{k2 >= k1}) :
Lemma ((n * pow2 k2) % pow2 k1 == 0) =
assert (k2 == (k2 - k1) + k1);
Math.pow2_plus (k2 - k1) k1;
Math.paren_mul_right n (pow2 (k2 - k1)) (pow2 k1);
mod_mul_cancel (n * pow2 (k2 - k1)) (pow2 k1)
val mod_double: a:nat -> k:nat{k>0} ->
Lemma (a % k % k == a % k)
let mod_double a k =
mod_mod a k 1
let shift_left_large_val (#n1:nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s:nat) :
Lemma ((a1 + a2 * pow2 n1) * pow2 s == (a1 * pow2 s + a2 * pow2 (n1+s))) =
Math.distributivity_add_left a1 (a2 * pow2 n1) (pow2 s);
Math.paren_mul_right a2 (pow2 n1) (pow2 s);
Math.pow2_plus n1 s
#push-options "--z3rlimit 40"
let shift_left_large_lemma (#n1: nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s: nat{s >= n2}) :
Lemma (((a1 + a2 * pow2 n1) * pow2 s) % pow2 (n1+n2) ==
(a1 * pow2 s) % pow2 (n1+n2)) =
shift_left_large_val a1 a2 s;
mod_add (a1 * pow2 s) (a2 * pow2 (n1+s)) (pow2 (n1+n2));
shift_past_mod a2 (n1+n2) (n1+s);
mod_double (a1 * pow2 s) (pow2 (n1+n2));
()
#pop-options
val shift_left_large_lemma_t : a:t -> s:nat ->
Lemma (requires (s >= 64))
(ensures ((v a * pow2 s) % pow2 128 ==
(U64.v a.low * pow2 s) % pow2 128))
let shift_left_large_lemma_t a s =
shift_left_large_lemma #64 #64 (U64.v a.low) (U64.v a.high) s
private let u32_64: n:U32.t{U32.v n == 64} = U32.uint_to_t 64
val div_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} -> Lemma
(requires True)
(ensures (a / pow2 (n1 - n2) == a * pow2 n2 / pow2 n1))
let div_pow2_diff a n1 n2 =
Math.pow2_plus n2 (n1-n2);
assert (a * pow2 n2 / pow2 n1 == a * pow2 n2 / (pow2 n2 * pow2 (n1 - n2)));
div_product (a * pow2 n2) (pow2 n2) (pow2 (n1-n2));
mul_div_cancel a (pow2 n2)
val mod_mul_pow2 : n:nat -> e1:nat -> e2:nat ->
Lemma (n % pow2 e1 * pow2 e2 <= pow2 (e1+e2) - pow2 e2)
let mod_mul_pow2 n e1 e2 =
Math.lemma_mod_lt n (pow2 e1);
Math.lemma_mult_le_right (pow2 e2) (n % pow2 e1) (pow2 e1 - 1);
assert (n % pow2 e1 * pow2 e2 <= pow2 e1 * pow2 e2 - pow2 e2);
Math.pow2_plus e1 e2
let pow2_div_bound #b (n:UInt.uint_t b) (s:nat{s <= b}) :
Lemma (n / pow2 s < pow2 (b - s)) =
Math.lemma_div_lt n b s
#push-options "--smtencoding.l_arith_repr native --z3rlimit 40"
let add_u64_shift_left (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r = (U64.v hi * pow2 (U32.v s)) % pow2 64 + U64.v lo / pow2 (64 - U32.v s))) =
let high = U64.shift_left hi s in
let low = U64.shift_right lo (U32.sub u32_64 s) in
let s = U32.v s in
let high_n = U64.v hi % pow2 (64 - s) * pow2 s in
let low_n = U64.v lo / pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 s) (pow2 (64-s));
assert (U64.v high == high_n);
assert (U64.v low == low_n);
pow2_div_bound (U64.v lo) (64-s);
assert (low_n < pow2 s);
mod_mul_pow2 (U64.v hi) (64 - s) s;
U64.add high low
#pop-options
let div_plus_multiple (a:nat) (b:nat) (k:pos) :
Lemma (requires (a < k))
(ensures ((a + b * k) / k == b)) =
Math.division_addition_lemma a k b;
Math.small_division_lemma_1 a k
val div_add_small: n:nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (k1*m / (k1*k2) == (n + k1*m) / (k1*k2)))
let div_add_small n m k1 k2 =
div_product (k1*m) k1 k2;
div_product (n+k1*m) k1 k2;
mul_div_cancel m k1;
assert (k1*m/k1 == m);
div_plus_multiple n m k1
val add_mod_small: n: nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (n + (k1 * m) % (k1 * k2) ==
(n + k1 * m) % (k1 * k2)))
let add_mod_small n m k1 k2 =
mod_spec (k1 * m) (k1 * k2);
mod_spec (n + k1 * m) (k1 * k2);
div_add_small n m k1 k2
let mod_then_mul_64 (n:nat) : Lemma (n % pow2 64 * pow2 64 == n * pow2 64 % pow2 128) =
Math.pow2_plus 64 64;
mod_mul n (pow2 64) (pow2 64)
let mul_abc_to_acb (a b c: int) : Lemma (a * b * c == a * c * b) = ()
let add_u64_shift_left_respec (hi lo:U64.t) (s:U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r ->
U64.v r * pow2 64 ==
(U64.v hi * pow2 64) * pow2 (U32.v s) % pow2 128 +
U64.v lo * pow2 (U32.v s) / pow2 64 * pow2 64)) =
let r = add_u64_shift_left hi lo s in
let hi = U64.v hi in
let lo = U64.v lo in
let s = U32.v s in
// spec of add_u64_shift_left
assert (U64.v r == hi * pow2 s % pow2 64 + lo / pow2 (64 - s));
Math.distributivity_add_left (hi * pow2 s % pow2 64) (lo / pow2 (64-s)) (pow2 64);
mod_then_mul_64 (hi * pow2 s);
assert (hi * pow2 s % pow2 64 * pow2 64 == (hi * pow2 s * pow2 64) % pow2 128);
div_pow2_diff lo 64 s;
assert (lo / pow2 (64-s) == lo * pow2 s / pow2 64);
assert (U64.v r * pow2 64 == hi * pow2 s * pow2 64 % pow2 128 + lo * pow2 s / pow2 64 * pow2 64);
mul_abc_to_acb hi (pow2 s) (pow2 64);
r
val add_mod_small' : n:nat -> m:nat -> k:pos ->
Lemma (requires (n + m % k < k))
(ensures (n + m % k == (n + m) % k))
let add_mod_small' n m k =
Math.lemma_mod_lt (n + m % k) k;
Math.modulo_lemma n k;
mod_add n m k
#push-options "--retry 5"
let shift_t_val (a: t) (s: nat) :
Lemma (v a * pow2 s == U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s)) =
Math.pow2_plus 64 s;
()
#pop-options
val mul_mod_bound : n:nat -> s1:nat -> s2:nat{s2>=s1} ->
Lemma (n * pow2 s1 % pow2 s2 <= pow2 s2 - pow2 s1)
#push-options "--retry 5"
let mul_mod_bound n s1 s2 =
// n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1
// n % pow2 (s2-s1) <= pow2 (s2-s1) - 1
// n % pow2 (s2-s1) * pow2 s1 <= pow2 s2 - pow2 s1
mod_mul n (pow2 s1) (pow2 (s2-s1));
// assert (n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1);
Math.lemma_mod_lt n (pow2 (s2-s1));
Math.lemma_mult_le_right (pow2 s1) (n % pow2 (s2-s1)) (pow2 (s2-s1) - 1);
Math.pow2_plus (s2-s1) s1
#pop-options
let add_lt_le (a a' b b': int) :
Lemma (requires (a < a' /\ b <= b'))
(ensures (a + b < a' + b')) = ()
let u64_pow2_bound (a: UInt.uint_t 64) (s: nat) :
Lemma (a * pow2 s < pow2 (64+s)) =
Math.pow2_plus 64 s;
Math.lemma_mult_le_right (pow2 s) a (pow2 64)
let shift_t_mod_val' (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s) % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
u64_pow2_bound a_l s;
mul_mod_bound a_h (64+s) 128;
// assert (a_h * pow2 (64+s) % pow2 128 <= pow2 128 - pow2 (64+s));
add_lt_le (a_l * pow2 s) (pow2 (64+s)) (a_h * pow2 (64+s) % pow2 128) (pow2 128 - pow2 (64+s));
add_mod_small' (a_l * pow2 s) (a_h * pow2 (64+s)) (pow2 128);
shift_t_val a s;
()
let shift_t_mod_val (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + (U64.v a.high * pow2 64) * pow2 s % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
shift_t_mod_val' a s;
Math.pow2_plus 64 s;
Math.paren_mul_right a_h (pow2 64) (pow2 s);
()
#push-options "--z3rlimit 20"
let shift_left_small (a: t) (s: U32.t) : Pure t
(requires (U32.v s < 64))
(ensures (fun r -> v r = (v a * pow2 (U32.v s)) % pow2 128)) =
if U32.eq s 0ul then a
else
let r = { low = U64.shift_left a.low s;
high = add_u64_shift_left_respec a.high a.low s; } in
let s = U32.v s in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
mod_spec_rew_n (a_l * pow2 s) (pow2 64);
shift_t_mod_val a s;
r
#pop-options
val shift_left_large : a:t -> s:U32.t{U32.v s >= 64 /\ U32.v s < 128} ->
r:t{v r = (v a * pow2 (U32.v s)) % pow2 128}
#push-options "--z3rlimit 50 --retry 5" // sporadically fails
let shift_left_large a s =
let h_shift = U32.sub s u32_64 in
assert (U32.v h_shift < 64);
let r = { low = U64.uint_to_t 0;
high = U64.shift_left a.low h_shift; } in
assert (U64.v r.high == (U64.v a.low * pow2 (U32.v s - 64)) % pow2 64);
mod_mul (U64.v a.low * pow2 (U32.v s - 64)) (pow2 64) (pow2 64);
Math.pow2_plus (U32.v s - 64) 64;
assert (U64.v r.high * pow2 64 == (U64.v a.low * pow2 (U32.v s)) % pow2 128);
shift_left_large_lemma_t a (U32.v s);
r
#pop-options
let shift_left a s =
if (U32.lt s u32_64) then shift_left_small a s
else shift_left_large a s
let add_u64_shift_right (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r == U64.v lo / pow2 (U32.v s) +
U64.v hi * pow2 (64 - U32.v s) % pow2 64)) =
let low = U64.shift_right lo s in
let high = U64.shift_left hi (U32.sub u32_64 s) in
let s = U32.v s in
let low_n = U64.v lo / pow2 s in
let high_n = U64.v hi % pow2 s * pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 (64-s)) (pow2 s);
assert (U64.v high == high_n);
pow2_div_bound (U64.v lo) s;
assert (low_n < pow2 (64 - s));
mod_mul_pow2 (U64.v hi) s (64 - s);
U64.add low high
val mul_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} ->
Lemma (a * pow2 (n1 - n2) == a * pow2 n1 / pow2 n2)
let mul_pow2_diff a n1 n2 =
Math.paren_mul_right a (pow2 (n1-n2)) (pow2 n2);
mul_div_cancel (a * pow2 (n1 - n2)) (pow2 n2);
Math.pow2_plus (n1 - n2) n2
let add_u64_shift_right_respec (hi lo:U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r == U64.v lo / pow2 (U32.v s) +
U64.v hi * pow2 64 / pow2 (U32.v s) % pow2 64)) =
let r = add_u64_shift_right hi lo s in
let s = U32.v s in
mul_pow2_diff (U64.v hi) 64 s;
r
let mul_div_spec (n:nat) (k:pos) : Lemma (n / k * k == n - n % k) = ()
let mul_distr_sub (n1 n2:nat) (k:nat) : Lemma ((n1 - n2) * k == n1 * k - n2 * k) = ()
val div_product_comm : n1:nat -> k1:pos -> k2:pos ->
Lemma (n1 / k1 / k2 == n1 / k2 / k1)
let div_product_comm n1 k1 k2 =
div_product n1 k1 k2;
div_product n1 k2 k1
val shift_right_reconstruct : a_h:UInt.uint_t 64 -> s:nat{s < 64} ->
Lemma (a_h * pow2 (64-s) == a_h / pow2 s * pow2 64 + a_h * pow2 64 / pow2 s % pow2 64)
let shift_right_reconstruct a_h s =
mul_pow2_diff a_h 64 s;
mod_spec_rew_n (a_h * pow2 (64-s)) (pow2 64);
div_product_comm (a_h * pow2 64) (pow2 s) (pow2 64);
mul_div_cancel a_h (pow2 64);
assert (a_h / pow2 s * pow2 64 == a_h * pow2 64 / pow2 s / pow2 64 * pow2 64);
()
val u128_div_pow2 (a: t) (s:nat{s < 64}) :
Lemma (v a / pow2 s == U64.v a.low / pow2 s + U64.v a.high * pow2 (64 - s))
let u128_div_pow2 a s =
Math.pow2_plus (64-s) s;
Math.paren_mul_right (U64.v a.high) (pow2 (64-s)) (pow2 s);
Math.division_addition_lemma (U64.v a.low) (pow2 s) (U64.v a.high * pow2 (64 - s))
let shift_right_small (a: t) (s: U32.t{U32.v s < 64}) : Pure t
(requires True)
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
if U32.eq s 0ul then a
else
let r = { low = add_u64_shift_right_respec a.high a.low s;
high = U64.shift_right a.high s; } in
let a_h = U64.v a.high in
let a_l = U64.v a.low in
let s = U32.v s in
shift_right_reconstruct a_h s;
assert (v r == a_h * pow2 (64-s) + a_l / pow2 s);
u128_div_pow2 a s;
r
let shift_right_large (a: t) (s: U32.t{U32.v s >= 64 /\ U32.v s < 128}) : Pure t
(requires True)
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
let r = { high = U64.uint_to_t 0;
low = U64.shift_right a.high (U32.sub s u32_64); } in
let s = U32.v s in
Math.pow2_plus 64 (s - 64);
div_product (v a) (pow2 64) (pow2 (s - 64));
assert (v a / pow2 s == v a / pow2 64 / pow2 (s - 64));
div_plus_multiple (U64.v a.low) (U64.v a.high) (pow2 64);
r
let shift_right (a: t) (s: U32.t) : Pure t
(requires (U32.v s < 128))
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
if U32.lt s u32_64
then shift_right_small a s
else shift_right_large a s
let eq (a b:t) = U64.eq a.low b.low && U64.eq a.high b.high
let gt (a b:t) = U64.gt a.high b.high ||
(U64.eq a.high b.high && U64.gt a.low b.low)
let lt (a b:t) = U64.lt a.high b.high ||
(U64.eq a.high b.high && U64.lt a.low b.low)
let gte (a b:t) = U64.gt a.high b.high ||
(U64.eq a.high b.high && U64.gte a.low b.low)
let lte (a b:t) = U64.lt a.high b.high ||
(U64.eq a.high b.high && U64.lte a.low b.low)
let u64_logand_comm (a b:U64.t) : Lemma (U64.logand a b == U64.logand b a) =
UInt.logand_commutative (U64.v a) (U64.v b)
val u64_and_0 (a b:U64.t) :
Lemma (U64.v b = 0 ==> U64.v (U64.logand a b) = 0)
[SMTPat (U64.logand a b)]
let u64_and_0 a b = UInt.logand_lemma_1 (U64.v a)
let u64_0_and (a b:U64.t) :
Lemma (U64.v a = 0 ==> U64.v (U64.logand a b) = 0)
[SMTPat (U64.logand a b)] =
u64_logand_comm a b
val u64_1s_and (a b:U64.t) :
Lemma (U64.v a = pow2 64 - 1 /\
U64.v b = pow2 64 - 1 ==> U64.v (U64.logand a b) = pow2 64 - 1)
[SMTPat (U64.logand a b)]
let u64_1s_and a b = UInt.logand_lemma_2 (U64.v a)
let eq_mask (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a = v b ==> v r = pow2 128 - 1) /\ (v a <> v b ==> v r = 0))) =
let mask = U64.logand (U64.eq_mask a.low b.low)
(U64.eq_mask a.high b.high) in
{ low = mask; high = mask; }
private let gte_characterization (a b: t) :
Lemma (v a >= v b ==>
U64.v a.high > U64.v b.high \/
(U64.v a.high = U64.v b.high /\ U64.v a.low >= U64.v b.low)) = ()
private let lt_characterization (a b: t) :
Lemma (v a < v b ==>
U64.v a.high < U64.v b.high \/
(U64.v a.high = U64.v b.high /\ U64.v a.low < U64.v b.low)) = ()
let u64_logor_comm (a b:U64.t) : Lemma (U64.logor a b == U64.logor b a) =
UInt.logor_commutative (U64.v a) (U64.v b)
val u64_or_1 (a b:U64.t) :
Lemma (U64.v b = pow2 64 - 1 ==> U64.v (U64.logor a b) = pow2 64 - 1)
[SMTPat (U64.logor a b)]
let u64_or_1 a b = UInt.logor_lemma_2 (U64.v a)
let u64_1_or (a b:U64.t) :
Lemma (U64.v a = pow2 64 - 1 ==> U64.v (U64.logor a b) = pow2 64 - 1)
[SMTPat (U64.logor a b)] =
u64_logor_comm a b
val u64_or_0 (a b:U64.t) :
Lemma (U64.v a = 0 /\ U64.v b = 0 ==> U64.v (U64.logor a b) = 0)
[SMTPat (U64.logor a b)]
let u64_or_0 a b = UInt.logor_lemma_1 (U64.v a)
val u64_not_0 (a:U64.t) :
Lemma (U64.v a = 0 ==> U64.v (U64.lognot a) = pow2 64 - 1)
[SMTPat (U64.lognot a)]
let u64_not_0 a = UInt.lognot_lemma_1 #64
val u64_not_1 (a:U64.t) :
Lemma (U64.v a = pow2 64 - 1 ==> U64.v (U64.lognot a) = 0)
[SMTPat (U64.lognot a)]
let u64_not_1 a =
UInt.nth_lemma (UInt.lognot #64 (UInt.ones 64)) (UInt.zero 64)
let gte_mask (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a >= v b ==> v r = pow2 128 - 1) /\ (v a < v b ==> v r = 0))) =
let mask_hi_gte = U64.logand (U64.gte_mask a.high b.high)
(U64.lognot (U64.eq_mask a.high b.high)) in
let mask_lo_gte = U64.logand (U64.eq_mask a.high b.high)
(U64.gte_mask a.low b.low) in
let mask = U64.logor mask_hi_gte mask_lo_gte in
gte_characterization a b;
lt_characterization a b;
{ low = mask; high = mask; }
let uint64_to_uint128 (a:U64.t) = { low = a; high = U64.uint_to_t 0; }
let uint128_to_uint64 (a:t) : b:U64.t{U64.v b == v a % pow2 64} = a.low
inline_for_extraction
let u64_l32_mask: x:U64.t{U64.v x == pow2 32 - 1} = U64.uint_to_t 0xffffffff
let u64_mod_32 (a: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r = U64.v a % pow2 32)) =
UInt.logand_mask (U64.v a) 32;
U64.logand a u64_l32_mask
let u64_32_digits (a: U64.t) : Lemma (U64.v a / pow2 32 * pow2 32 + U64.v a % pow2 32 == U64.v a) =
div_mod (U64.v a) (pow2 32)
val mul32_digits : x:UInt.uint_t 64 -> y:UInt.uint_t 32 ->
Lemma (x * y == (x / pow2 32 * y) * pow2 32 + (x % pow2 32) * y)
let mul32_digits x y = ()
let u32_32 : x:U32.t{U32.v x == 32} = U32.uint_to_t 32
#push-options "--z3rlimit 30"
let u32_combine (hi lo: U64.t) : Pure U64.t
(requires (U64.v lo < pow2 32))
(ensures (fun r -> U64.v r = U64.v hi % pow2 32 * pow2 32 + U64.v lo)) =
U64.add lo (U64.shift_left hi u32_32)
#pop-options
let product_bound (a b:nat) (k:pos) :
Lemma (requires (a < k /\ b < k))
(ensures a * b <= k * k - 2*k + 1) =
Math.lemma_mult_le_right b a (k-1);
Math.lemma_mult_le_left (k-1) b (k-1)
val uint_product_bound : #n:nat -> a:UInt.uint_t n -> b:UInt.uint_t n ->
Lemma (a * b <= pow2 (2*n) - 2*(pow2 n) + 1)
let uint_product_bound #n a b =
product_bound a b (pow2 n);
Math.pow2_plus n n
val u32_product_bound : a:nat{a < pow2 32} -> b:nat{b < pow2 32} ->
Lemma (UInt.size (a * b) 64 /\ a * b < pow2 64 - pow2 32 - 1)
let u32_product_bound a b =
uint_product_bound #32 a b
let mul32 x y =
let x0 = u64_mod_32 x in
let x1 = U64.shift_right x u32_32 in
u32_product_bound (U64.v x0) (U32.v y);
let x0y = U64.mul x0 (FStar.Int.Cast.uint32_to_uint64 y) in
let x0yl = u64_mod_32 x0y in
let x0yh = U64.shift_right x0y u32_32 in
u32_product_bound (U64.v x1) (U32.v y);
// not in the original C code
let x1y' = U64.mul x1 (FStar.Int.Cast.uint32_to_uint64 y) in
let x1y = U64.add x1y' x0yh in
// correspondence with C:
// r0 = r.low
// r0 is written using u32_combine hi lo = lo + hi << 32
// r1 = r.high
let r = { low = u32_combine x1y x0yl;
high = U64.shift_right x1y u32_32; } in
u64_32_digits x;
//assert (U64.v x == U64.v x1 * pow2 32 + U64.v x0);
assert (U64.v x0y == U64.v x0 * U32.v y);
u64_32_digits x0y;
//assert (U64.v x0y == U64.v x0yh * pow2 32 + U64.v x0yl);
assert (U64.v x1y' == U64.v x / pow2 32 * U32.v y);
mul32_digits (U64.v x) (U32.v y);
assert (U64.v x * U32.v y == U64.v x1y' * pow2 32 + U64.v x0y);
r
let l32 (x: UInt.uint_t 64) : UInt.uint_t 32 = x % pow2 32
let h32 (x: UInt.uint_t 64) : UInt.uint_t 32 = x / pow2 32
val mul32_bound : x:UInt.uint_t 32 -> y:UInt.uint_t 32 ->
n:UInt.uint_t 64{n < pow2 64 - pow2 32 - 1 /\ n == x * y}
let mul32_bound x y =
u32_product_bound x y;
x * y
let pll (x y: U64.t) : n:UInt.uint_t 64{n < pow2 64 - pow2 32 - 1} =
mul32_bound (l32 (U64.v x)) (l32 (U64.v y))
let plh (x y: U64.t) : n:UInt.uint_t 64{n < pow2 64 - pow2 32 - 1} =
mul32_bound (l32 (U64.v x)) (h32 (U64.v y))
let phl (x y: U64.t) : n:UInt.uint_t 64{n < pow2 64 - pow2 32 - 1} =
mul32_bound (h32 (U64.v x)) (l32 (U64.v y))
let phh (x y: U64.t) : n:UInt.uint_t 64{n < pow2 64 - pow2 32 - 1} =
mul32_bound (h32 (U64.v x)) (h32 (U64.v y))
let pll_l (x y: U64.t) : UInt.uint_t 32 =
l32 (pll x y)
let pll_h (x y: U64.t) : UInt.uint_t 32 =
h32 (pll x y)
let mul_wide_low (x y: U64.t) = (plh x y + (phl x y + pll_h x y) % pow2 32) * pow2 32 % pow2 64 + pll_l x y
let mul_wide_high (x y: U64.t) =
phh x y +
(phl x y + pll_h x y) / pow2 32 +
(plh x y + (phl x y + pll_h x y) % pow2 32) / pow2 32
inline_for_extraction noextract
let mul_wide_impl_t' (x y: U64.t) : Pure (tuple4 U64.t U64.t U64.t U64.t)
(requires True)
(ensures (fun r -> let (u1, w3, x', t') = r in
U64.v u1 == U64.v x % pow2 32 /\
U64.v w3 == pll_l x y /\
U64.v x' == h32 (U64.v x) /\
U64.v t' == phl x y + pll_h x y)) =
let u1 = u64_mod_32 x in
let v1 = u64_mod_32 y in
u32_product_bound (U64.v u1) (U64.v v1);
let t = U64.mul u1 v1 in
assert (U64.v t == pll x y);
let w3 = u64_mod_32 t in
assert (U64.v w3 == pll_l x y);
let k = U64.shift_right t u32_32 in
assert (U64.v k == pll_h x y);
let x' = U64.shift_right x u32_32 in
assert (U64.v x' == h32 (U64.v x));
u32_product_bound (U64.v x') (U64.v v1);
let t' = U64.add (U64.mul x' v1) k in
(u1, w3, x', t')
// similar to u32_combine, but use % 2^64 * 2^32
let u32_combine' (hi lo: U64.t) : Pure U64.t
(requires (U64.v lo < pow2 32)) | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val u32_combine' (hi lo: U64.t)
: Pure U64.t
(requires (U64.v lo < pow2 32))
(ensures (fun r -> U64.v r = U64.v hi * pow2 32 % pow2 64 + U64.v lo)) | [] | FStar.UInt128.u32_combine' | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | hi: FStar.UInt64.t -> lo: FStar.UInt64.t -> Prims.Pure FStar.UInt64.t | {
"end_col": 39,
"end_line": 1024,
"start_col": 2,
"start_line": 1024
} |
FStar.Pervasives.Lemma | val product_low_expand (x y: U64.t)
: Lemma
((U64.v x * U64.v y) % pow2 64 ==
((plh x y + phl x y + pll_h x y) * pow2 32 + pll_l x y) % pow2 64) | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let product_low_expand (x y: U64.t) :
Lemma ((U64.v x * U64.v y) % pow2 64 ==
((plh x y + phl x y + pll_h x y) * pow2 32 + pll_l x y) % pow2 64) =
product_expand x y;
Math.lemma_mod_plus ((plh x y + phl x y + pll_h x y) * pow2 32 + pll_l x y) (phh x y) (pow2 64) | val product_low_expand (x y: U64.t)
: Lemma
((U64.v x * U64.v y) % pow2 64 ==
((plh x y + phl x y + pll_h x y) * pow2 32 + pll_l x y) % pow2 64)
let product_low_expand (x y: U64.t)
: Lemma
((U64.v x * U64.v y) % pow2 64 ==
((plh x y + phl x y + pll_h x y) * pow2 32 + pll_l x y) % pow2 64) = | false | null | true | product_expand x y;
Math.lemma_mod_plus ((plh x y + phl x y + pll_h x y) * pow2 32 + pll_l x y) (phh x y) (pow2 64) | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [
"lemma"
] | [
"FStar.UInt64.t",
"FStar.Math.Lemmas.lemma_mod_plus",
"Prims.op_Addition",
"FStar.Mul.op_Star",
"FStar.UInt128.plh",
"FStar.UInt128.phl",
"FStar.UInt128.pll_h",
"Prims.pow2",
"FStar.UInt128.pll_l",
"FStar.UInt128.phh",
"Prims.unit",
"FStar.UInt128.product_expand",
"Prims.l_True",
"Prims.squash",
"Prims.eq2",
"Prims.int",
"Prims.op_Modulus",
"FStar.UInt64.v",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract
let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63
let carry_uint64_ok (a b:uint_t 64)
: squash (int2bv (carry_uint64 a b) == carry_bv a b)
= _ by (T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl())
let fact1 (a b: uint_t 64) = carry_bv a b == int2bv 1
let fact0 (a b: uint_t 64) = carry_bv a b == int2bv 0
let lem_ult_1 (a b: uint_t 64)
: squash (bvult (int2bv a) (int2bv b) ==> fact1 a b)
= assert (bvult (int2bv a) (int2bv b) ==> fact1 a b)
by (T.norm [delta_only [`%fact1;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'";
smt())
let lem_ult_2 (a b:uint_t 64)
: squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
= assert (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
by (T.norm [delta_only [`%fact0;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'")
let int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b))
= introduce (a < b) ==> (bvult #n (int2bv #n a) (int2bv #n b))
with _ . FStar.BV.int2bv_lemma_ult_1 a b;
introduce (bvult #n (int2bv #n a) (int2bv #n b)) ==> (a < b)
with _ . FStar.BV.int2bv_lemma_ult_2 a b
let lem_ult (a b:uint_t 64)
: Lemma (if a < b
then fact1 a b
else fact0 a b)
= int2bv_ult a b;
lem_ult_1 a b;
lem_ult_2 a b
let constant_time_carry (a b: U64.t) : Tot U64.t =
let open U64 in
// CONSTANT_TIME_CARRY macro
// ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
// 63 = sizeof(a) * 8 - 1
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63ul
let carry_uint64_equiv (a b:UInt64.t)
: Lemma (U64.v (constant_time_carry a b) == carry_uint64 (U64.v a) (U64.v b))
= ()
// This type gets a special treatment in KaRaMeL and its definition is never
// printed in the resulting C file.
type uint128: Type0 = { low: U64.t; high: U64.t }
let t = uint128
let _ = intro_ambient n
let _ = intro_ambient t
[@@ noextract_to "krml"]
let v x = U64.v x.low + (U64.v x.high) * (pow2 64)
let div_mod (x:nat) (k:nat{k > 0}) : Lemma (x / k * k + x % k == x) = ()
let uint_to_t x =
div_mod x (pow2 64);
{ low = U64.uint_to_t (x % (pow2 64));
high = U64.uint_to_t (x / (pow2 64)); }
let v_inj (x1 x2: t): Lemma (requires (v x1 == v x2))
(ensures x1 == x2) =
assert (uint_to_t (v x1) == uint_to_t (v x2));
assert (uint_to_t (v x1) == x1);
assert (uint_to_t (v x2) == x2);
()
(* A weird helper used below... seems like the native encoding of
bitvectors may be making these proofs much harder than they should be? *)
let bv2int_fun (#n:pos) (a b : bv_t n)
: Lemma (requires a == b)
(ensures bv2int a == bv2int b)
= ()
(* This proof is quite brittle. It has a bunch of annotations to get
decent verification performance. *)
let constant_time_carry_ok (a b:U64.t)
: Lemma (constant_time_carry a b ==
(if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0))
= calc (==) {
U64.v (constant_time_carry a b);
(==) { carry_uint64_equiv a b }
carry_uint64 (U64.v a) (U64.v b);
(==) { inverse_num_lemma (carry_uint64 (U64.v a) (U64.v b)) }
bv2int (int2bv (carry_uint64 (U64.v a) (U64.v b)));
(==) { carry_uint64_ok (U64.v a) (U64.v b);
bv2int_fun (int2bv (carry_uint64 (U64.v a) (U64.v b))) (carry_bv (U64.v a) (U64.v b));
()
}
bv2int (carry_bv (U64.v a) (U64.v b));
(==) {
lem_ult (U64.v a) (U64.v b);
bv2int_fun (carry_bv (U64.v a) (U64.v b)) (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
}
bv2int
(if U64.v a < U64.v b
then int2bv 1
else int2bv 0);
};
assert (
bv2int (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
== U64.v (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)) by (T.norm []);
U64.v_inj (constant_time_carry a b) (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)
let carry (a b: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r == (if U64.v a < U64.v b then 1 else 0))) =
constant_time_carry_ok a b;
constant_time_carry a b
let carry_sum_ok (a b:U64.t) :
Lemma (U64.v (carry (U64.add_mod a b) b) == (U64.v a + U64.v b) / (pow2 64)) = ()
let add (a b: t) : Pure t
(requires (v a + v b < pow2 128))
(ensures (fun r -> v a + v b = v r)) =
let l = U64.add_mod a.low b.low in
carry_sum_ok a.low b.low;
{ low = l;
high = U64.add (U64.add a.high b.high) (carry l b.low); }
let add_underspec (a b: t) =
let l = U64.add_mod a.low b.low in
begin
if v a + v b < pow2 128
then carry_sum_ok a.low b.low
else ()
end;
{ low = l;
high = U64.add_underspec (U64.add_underspec a.high b.high) (carry l b.low); }
val mod_mod: a:nat -> k:nat{k>0} -> k':nat{k'>0} ->
Lemma ((a % k) % (k'*k) == a % k)
let mod_mod a k k' =
assert (a % k < k);
assert (a % k < k' * k)
let mod_spec (a:nat) (k:nat{k > 0}) :
Lemma (a % k == a - a / k * k) = ()
val div_product : n:nat -> m1:nat{m1>0} -> m2:nat{m2>0} ->
Lemma (n / (m1*m2) == (n / m1) / m2)
let div_product n m1 m2 =
Math.division_multiplication_lemma n m1 m2
val mul_div_cancel : n:nat -> k:nat{k>0} ->
Lemma ((n * k) / k == n)
let mul_div_cancel n k =
Math.cancel_mul_div n k
val mod_mul: n:nat -> k1:pos -> k2:pos ->
Lemma ((n % k2) * k1 == (n * k1) % (k1*k2))
let mod_mul n k1 k2 =
Math.modulo_scale_lemma n k1 k2
let mod_spec_rew_n (n:nat) (k:nat{k > 0}) :
Lemma (n == n / k * k + n % k) = mod_spec n k
val mod_add: n1:nat -> n2:nat -> k:nat{k > 0} ->
Lemma ((n1 % k + n2 % k) % k == (n1 + n2) % k)
let mod_add n1 n2 k = Math.modulo_distributivity n1 n2 k
val mod_add_small: n1:nat -> n2:nat -> k:nat{k > 0} -> Lemma
(requires (n1 % k + n2 % k < k))
(ensures (n1 % k + n2 % k == (n1 + n2) % k))
let mod_add_small n1 n2 k =
mod_add n1 n2 k;
Math.small_modulo_lemma_1 (n1%k + n2%k) k
// This proof is pretty stable with the calc proof, but it can fail
// ~1% of the times, so add a retry.
#push-options "--split_queries no --z3rlimit 20 --retry 5"
let add_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a + v b) % pow2 128 = v r)) =
let l = U64.add_mod a.low b.low in
let r = { low = l;
high = U64.add_mod (U64.add_mod a.high b.high) (carry l b.low)} in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
let b_l = U64.v b.low in
let b_h = U64.v b.high in
carry_sum_ok a.low b.low;
Math.lemma_mod_plus_distr_l (a_h + b_h) ((a_l + b_l) / (pow2 64)) (pow2 64);
calc (==) {
U64.v r.high * pow2 64;
== {}
((a_h + b_h + (a_l + b_l) / (pow2 64)) % pow2 64) * pow2 64;
== { mod_mul (a_h + b_h + (a_l + b_l) / (pow2 64)) (pow2 64) (pow2 64) }
((a_h + b_h + (a_l + b_l) / (pow2 64)) * pow2 64) % (pow2 64 * pow2 64);
== {}
((a_h + b_h + (a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
== {}
(a_h * pow2 64 + b_h * pow2 64 + ((a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
};
assert (U64.v r.low == (U64.v r.low) % pow2 128);
mod_add_small (a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64))
((a_l + b_l) % (pow2 64))
(pow2 128);
assert (U64.v r.low + U64.v r.high * pow2 64 ==
(a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64) + (a_l + b_l) % (pow2 64)) % pow2 128);
mod_spec_rew_n (a_l + b_l) (pow2 64);
assert (v r ==
(a_h * pow2 64 +
b_h * pow2 64 +
a_l + b_l) % pow2 128);
assert_spinoff ((v a + v b) % pow2 128 = v r);
r
#pop-options
#push-options "--retry 5"
let sub (a b: t) : Pure t
(requires (v a - v b >= 0))
(ensures (fun r -> v r = v a - v b)) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub (U64.sub a.high b.high) (carry a.low l); }
#pop-options
let sub_underspec (a b: t) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_underspec (U64.sub_underspec a.high b.high) (carry a.low l); }
let sub_mod_impl (a b: t) : t =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_mod (U64.sub_mod a.high b.high) (carry a.low l); }
#push-options "--retry 10" // flaky
let sub_mod_pos_ok (a b:t) : Lemma
(requires (v a - v b >= 0))
(ensures (v (sub_mod_impl a b) = v a - v b)) =
assert (sub a b == sub_mod_impl a b);
()
#pop-options
val u64_diff_wrap : a:U64.t -> b:U64.t ->
Lemma (requires (U64.v a < U64.v b))
(ensures (U64.v (U64.sub_mod a b) == U64.v a - U64.v b + pow2 64))
let u64_diff_wrap a b = ()
#push-options "--z3rlimit 20"
val sub_mod_wrap1_ok : a:t -> b:t -> Lemma
(requires (v a - v b < 0 /\ U64.v a.low < U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128))
#push-options "--retry 10"
let sub_mod_wrap1_ok a b =
// carry == 1 and subtraction in low wraps
let l = U64.sub_mod a.low b.low in
assert (U64.v (carry a.low l) == 1);
u64_diff_wrap a.low b.low;
// a.high <= b.high since v a < v b;
// case split on equality and strictly less
if U64.v a.high = U64.v b.high then ()
else begin
u64_diff_wrap a.high b.high;
()
end
#pop-options
let sum_lt (a1 a2 b1 b2:nat) : Lemma
(requires (a1 + a2 < b1 + b2 /\ a1 >= b1))
(ensures (a2 < b2)) = ()
let sub_mod_wrap2_ok (a b:t) : Lemma
(requires (v a - v b < 0 /\ U64.v a.low >= U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
// carry == 0, subtraction in low is exact, but subtraction in high
// must wrap since v a < v b
let l = U64.sub_mod a.low b.low in
let r = sub_mod_impl a b in
assert (U64.v l == U64.v a.low - U64.v b.low);
assert (U64.v (carry a.low l) == 0);
sum_lt (U64.v a.low) (U64.v a.high * pow2 64) (U64.v b.low) (U64.v b.high * pow2 64);
assert (U64.v (U64.sub_mod a.high b.high) == U64.v a.high - U64.v b.high + pow2 64);
()
let sub_mod_wrap_ok (a b:t) : Lemma
(requires (v a - v b < 0))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
if U64.v a.low < U64.v b.low
then sub_mod_wrap1_ok a b
else sub_mod_wrap2_ok a b
#push-options "--z3rlimit 40"
let sub_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = (v a - v b) % pow2 128)) =
(if v a - v b >= 0
then sub_mod_pos_ok a b
else sub_mod_wrap_ok a b);
sub_mod_impl a b
#pop-options
val shift_bound : #n:nat -> num:UInt.uint_t n -> n':nat ->
Lemma (num * pow2 n' <= pow2 (n'+n) - pow2 n')
let shift_bound #n num n' =
Math.lemma_mult_le_right (pow2 n') num (pow2 n - 1);
Math.distributivity_sub_left (pow2 n) 1 (pow2 n');
Math.pow2_plus n' n
val append_uint : #n1:nat -> #n2:nat -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 -> UInt.uint_t (n1+n2)
let append_uint #n1 #n2 num1 num2 =
shift_bound num2 n1;
num1 + num2 * pow2 n1
val to_vec_append : #n1:nat{n1 > 0} -> #n2:nat{n2 > 0} -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 ->
Lemma (UInt.to_vec (append_uint num1 num2) == Seq.append (UInt.to_vec num2) (UInt.to_vec num1))
let to_vec_append #n1 #n2 num1 num2 =
UInt.append_lemma (UInt.to_vec num2) (UInt.to_vec num1)
let vec128 (a: t) : BV.bv_t 128 = UInt.to_vec #128 (v a)
let vec64 (a: U64.t) : BV.bv_t 64 = UInt.to_vec (U64.v a)
let to_vec_v (a: t) :
Lemma (vec128 a == Seq.append (vec64 a.high) (vec64 a.low)) =
to_vec_append (U64.v a.low) (U64.v a.high)
val logand_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2) ==
BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2))
(BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logand #128 (v a) (v b))) =
let r = { low = U64.logand a.low b.low;
high = U64.logand a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logand_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logand_vec (vec128 a) (vec128 b));
r
val logxor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2) ==
BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2))
(BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logxor #128 (v a) (v b))) =
let r = { low = U64.logxor a.low b.low;
high = U64.logxor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logxor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logxor_vec (vec128 a) (vec128 b));
r
val logor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2) ==
BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2))
(BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logor #128 (v a) (v b))) =
let r = { low = U64.logor a.low b.low;
high = U64.logor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logor_vec (vec128 a) (vec128 b));
r
val lognot_vec_append (#n1 #n2: pos) (a1: BV.bv_t n1) (a2: BV.bv_t n2) :
Lemma (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2) ==
BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot_vec_append #n1 #n2 a1 a2 =
Seq.lemma_eq_intro (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2))
(BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot (a: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.lognot #128 (v a))) =
let r = { low = U64.lognot a.low;
high = U64.lognot a.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
lognot_vec_append (vec64 a.high) (vec64 a.low);
to_vec_v a;
assert (vec128 r == BV.lognot_vec (vec128 a));
r
let mod_mul_cancel (n:nat) (k:nat{k > 0}) :
Lemma ((n * k) % k == 0) =
mod_spec (n * k) k;
mul_div_cancel n k;
()
let shift_past_mod (n:nat) (k1:nat) (k2:nat{k2 >= k1}) :
Lemma ((n * pow2 k2) % pow2 k1 == 0) =
assert (k2 == (k2 - k1) + k1);
Math.pow2_plus (k2 - k1) k1;
Math.paren_mul_right n (pow2 (k2 - k1)) (pow2 k1);
mod_mul_cancel (n * pow2 (k2 - k1)) (pow2 k1)
val mod_double: a:nat -> k:nat{k>0} ->
Lemma (a % k % k == a % k)
let mod_double a k =
mod_mod a k 1
let shift_left_large_val (#n1:nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s:nat) :
Lemma ((a1 + a2 * pow2 n1) * pow2 s == (a1 * pow2 s + a2 * pow2 (n1+s))) =
Math.distributivity_add_left a1 (a2 * pow2 n1) (pow2 s);
Math.paren_mul_right a2 (pow2 n1) (pow2 s);
Math.pow2_plus n1 s
#push-options "--z3rlimit 40"
let shift_left_large_lemma (#n1: nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s: nat{s >= n2}) :
Lemma (((a1 + a2 * pow2 n1) * pow2 s) % pow2 (n1+n2) ==
(a1 * pow2 s) % pow2 (n1+n2)) =
shift_left_large_val a1 a2 s;
mod_add (a1 * pow2 s) (a2 * pow2 (n1+s)) (pow2 (n1+n2));
shift_past_mod a2 (n1+n2) (n1+s);
mod_double (a1 * pow2 s) (pow2 (n1+n2));
()
#pop-options
val shift_left_large_lemma_t : a:t -> s:nat ->
Lemma (requires (s >= 64))
(ensures ((v a * pow2 s) % pow2 128 ==
(U64.v a.low * pow2 s) % pow2 128))
let shift_left_large_lemma_t a s =
shift_left_large_lemma #64 #64 (U64.v a.low) (U64.v a.high) s
private let u32_64: n:U32.t{U32.v n == 64} = U32.uint_to_t 64
val div_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} -> Lemma
(requires True)
(ensures (a / pow2 (n1 - n2) == a * pow2 n2 / pow2 n1))
let div_pow2_diff a n1 n2 =
Math.pow2_plus n2 (n1-n2);
assert (a * pow2 n2 / pow2 n1 == a * pow2 n2 / (pow2 n2 * pow2 (n1 - n2)));
div_product (a * pow2 n2) (pow2 n2) (pow2 (n1-n2));
mul_div_cancel a (pow2 n2)
val mod_mul_pow2 : n:nat -> e1:nat -> e2:nat ->
Lemma (n % pow2 e1 * pow2 e2 <= pow2 (e1+e2) - pow2 e2)
let mod_mul_pow2 n e1 e2 =
Math.lemma_mod_lt n (pow2 e1);
Math.lemma_mult_le_right (pow2 e2) (n % pow2 e1) (pow2 e1 - 1);
assert (n % pow2 e1 * pow2 e2 <= pow2 e1 * pow2 e2 - pow2 e2);
Math.pow2_plus e1 e2
let pow2_div_bound #b (n:UInt.uint_t b) (s:nat{s <= b}) :
Lemma (n / pow2 s < pow2 (b - s)) =
Math.lemma_div_lt n b s
#push-options "--smtencoding.l_arith_repr native --z3rlimit 40"
let add_u64_shift_left (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r = (U64.v hi * pow2 (U32.v s)) % pow2 64 + U64.v lo / pow2 (64 - U32.v s))) =
let high = U64.shift_left hi s in
let low = U64.shift_right lo (U32.sub u32_64 s) in
let s = U32.v s in
let high_n = U64.v hi % pow2 (64 - s) * pow2 s in
let low_n = U64.v lo / pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 s) (pow2 (64-s));
assert (U64.v high == high_n);
assert (U64.v low == low_n);
pow2_div_bound (U64.v lo) (64-s);
assert (low_n < pow2 s);
mod_mul_pow2 (U64.v hi) (64 - s) s;
U64.add high low
#pop-options
let div_plus_multiple (a:nat) (b:nat) (k:pos) :
Lemma (requires (a < k))
(ensures ((a + b * k) / k == b)) =
Math.division_addition_lemma a k b;
Math.small_division_lemma_1 a k
val div_add_small: n:nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (k1*m / (k1*k2) == (n + k1*m) / (k1*k2)))
let div_add_small n m k1 k2 =
div_product (k1*m) k1 k2;
div_product (n+k1*m) k1 k2;
mul_div_cancel m k1;
assert (k1*m/k1 == m);
div_plus_multiple n m k1
val add_mod_small: n: nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (n + (k1 * m) % (k1 * k2) ==
(n + k1 * m) % (k1 * k2)))
let add_mod_small n m k1 k2 =
mod_spec (k1 * m) (k1 * k2);
mod_spec (n + k1 * m) (k1 * k2);
div_add_small n m k1 k2
let mod_then_mul_64 (n:nat) : Lemma (n % pow2 64 * pow2 64 == n * pow2 64 % pow2 128) =
Math.pow2_plus 64 64;
mod_mul n (pow2 64) (pow2 64)
let mul_abc_to_acb (a b c: int) : Lemma (a * b * c == a * c * b) = ()
let add_u64_shift_left_respec (hi lo:U64.t) (s:U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r ->
U64.v r * pow2 64 ==
(U64.v hi * pow2 64) * pow2 (U32.v s) % pow2 128 +
U64.v lo * pow2 (U32.v s) / pow2 64 * pow2 64)) =
let r = add_u64_shift_left hi lo s in
let hi = U64.v hi in
let lo = U64.v lo in
let s = U32.v s in
// spec of add_u64_shift_left
assert (U64.v r == hi * pow2 s % pow2 64 + lo / pow2 (64 - s));
Math.distributivity_add_left (hi * pow2 s % pow2 64) (lo / pow2 (64-s)) (pow2 64);
mod_then_mul_64 (hi * pow2 s);
assert (hi * pow2 s % pow2 64 * pow2 64 == (hi * pow2 s * pow2 64) % pow2 128);
div_pow2_diff lo 64 s;
assert (lo / pow2 (64-s) == lo * pow2 s / pow2 64);
assert (U64.v r * pow2 64 == hi * pow2 s * pow2 64 % pow2 128 + lo * pow2 s / pow2 64 * pow2 64);
mul_abc_to_acb hi (pow2 s) (pow2 64);
r
val add_mod_small' : n:nat -> m:nat -> k:pos ->
Lemma (requires (n + m % k < k))
(ensures (n + m % k == (n + m) % k))
let add_mod_small' n m k =
Math.lemma_mod_lt (n + m % k) k;
Math.modulo_lemma n k;
mod_add n m k
#push-options "--retry 5"
let shift_t_val (a: t) (s: nat) :
Lemma (v a * pow2 s == U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s)) =
Math.pow2_plus 64 s;
()
#pop-options
val mul_mod_bound : n:nat -> s1:nat -> s2:nat{s2>=s1} ->
Lemma (n * pow2 s1 % pow2 s2 <= pow2 s2 - pow2 s1)
#push-options "--retry 5"
let mul_mod_bound n s1 s2 =
// n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1
// n % pow2 (s2-s1) <= pow2 (s2-s1) - 1
// n % pow2 (s2-s1) * pow2 s1 <= pow2 s2 - pow2 s1
mod_mul n (pow2 s1) (pow2 (s2-s1));
// assert (n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1);
Math.lemma_mod_lt n (pow2 (s2-s1));
Math.lemma_mult_le_right (pow2 s1) (n % pow2 (s2-s1)) (pow2 (s2-s1) - 1);
Math.pow2_plus (s2-s1) s1
#pop-options
let add_lt_le (a a' b b': int) :
Lemma (requires (a < a' /\ b <= b'))
(ensures (a + b < a' + b')) = ()
let u64_pow2_bound (a: UInt.uint_t 64) (s: nat) :
Lemma (a * pow2 s < pow2 (64+s)) =
Math.pow2_plus 64 s;
Math.lemma_mult_le_right (pow2 s) a (pow2 64)
let shift_t_mod_val' (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s) % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
u64_pow2_bound a_l s;
mul_mod_bound a_h (64+s) 128;
// assert (a_h * pow2 (64+s) % pow2 128 <= pow2 128 - pow2 (64+s));
add_lt_le (a_l * pow2 s) (pow2 (64+s)) (a_h * pow2 (64+s) % pow2 128) (pow2 128 - pow2 (64+s));
add_mod_small' (a_l * pow2 s) (a_h * pow2 (64+s)) (pow2 128);
shift_t_val a s;
()
let shift_t_mod_val (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + (U64.v a.high * pow2 64) * pow2 s % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
shift_t_mod_val' a s;
Math.pow2_plus 64 s;
Math.paren_mul_right a_h (pow2 64) (pow2 s);
()
#push-options "--z3rlimit 20"
let shift_left_small (a: t) (s: U32.t) : Pure t
(requires (U32.v s < 64))
(ensures (fun r -> v r = (v a * pow2 (U32.v s)) % pow2 128)) =
if U32.eq s 0ul then a
else
let r = { low = U64.shift_left a.low s;
high = add_u64_shift_left_respec a.high a.low s; } in
let s = U32.v s in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
mod_spec_rew_n (a_l * pow2 s) (pow2 64);
shift_t_mod_val a s;
r
#pop-options
val shift_left_large : a:t -> s:U32.t{U32.v s >= 64 /\ U32.v s < 128} ->
r:t{v r = (v a * pow2 (U32.v s)) % pow2 128}
#push-options "--z3rlimit 50 --retry 5" // sporadically fails
let shift_left_large a s =
let h_shift = U32.sub s u32_64 in
assert (U32.v h_shift < 64);
let r = { low = U64.uint_to_t 0;
high = U64.shift_left a.low h_shift; } in
assert (U64.v r.high == (U64.v a.low * pow2 (U32.v s - 64)) % pow2 64);
mod_mul (U64.v a.low * pow2 (U32.v s - 64)) (pow2 64) (pow2 64);
Math.pow2_plus (U32.v s - 64) 64;
assert (U64.v r.high * pow2 64 == (U64.v a.low * pow2 (U32.v s)) % pow2 128);
shift_left_large_lemma_t a (U32.v s);
r
#pop-options
let shift_left a s =
if (U32.lt s u32_64) then shift_left_small a s
else shift_left_large a s
let add_u64_shift_right (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r == U64.v lo / pow2 (U32.v s) +
U64.v hi * pow2 (64 - U32.v s) % pow2 64)) =
let low = U64.shift_right lo s in
let high = U64.shift_left hi (U32.sub u32_64 s) in
let s = U32.v s in
let low_n = U64.v lo / pow2 s in
let high_n = U64.v hi % pow2 s * pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 (64-s)) (pow2 s);
assert (U64.v high == high_n);
pow2_div_bound (U64.v lo) s;
assert (low_n < pow2 (64 - s));
mod_mul_pow2 (U64.v hi) s (64 - s);
U64.add low high
val mul_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} ->
Lemma (a * pow2 (n1 - n2) == a * pow2 n1 / pow2 n2)
let mul_pow2_diff a n1 n2 =
Math.paren_mul_right a (pow2 (n1-n2)) (pow2 n2);
mul_div_cancel (a * pow2 (n1 - n2)) (pow2 n2);
Math.pow2_plus (n1 - n2) n2
let add_u64_shift_right_respec (hi lo:U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r == U64.v lo / pow2 (U32.v s) +
U64.v hi * pow2 64 / pow2 (U32.v s) % pow2 64)) =
let r = add_u64_shift_right hi lo s in
let s = U32.v s in
mul_pow2_diff (U64.v hi) 64 s;
r
let mul_div_spec (n:nat) (k:pos) : Lemma (n / k * k == n - n % k) = ()
let mul_distr_sub (n1 n2:nat) (k:nat) : Lemma ((n1 - n2) * k == n1 * k - n2 * k) = ()
val div_product_comm : n1:nat -> k1:pos -> k2:pos ->
Lemma (n1 / k1 / k2 == n1 / k2 / k1)
let div_product_comm n1 k1 k2 =
div_product n1 k1 k2;
div_product n1 k2 k1
val shift_right_reconstruct : a_h:UInt.uint_t 64 -> s:nat{s < 64} ->
Lemma (a_h * pow2 (64-s) == a_h / pow2 s * pow2 64 + a_h * pow2 64 / pow2 s % pow2 64)
let shift_right_reconstruct a_h s =
mul_pow2_diff a_h 64 s;
mod_spec_rew_n (a_h * pow2 (64-s)) (pow2 64);
div_product_comm (a_h * pow2 64) (pow2 s) (pow2 64);
mul_div_cancel a_h (pow2 64);
assert (a_h / pow2 s * pow2 64 == a_h * pow2 64 / pow2 s / pow2 64 * pow2 64);
()
val u128_div_pow2 (a: t) (s:nat{s < 64}) :
Lemma (v a / pow2 s == U64.v a.low / pow2 s + U64.v a.high * pow2 (64 - s))
let u128_div_pow2 a s =
Math.pow2_plus (64-s) s;
Math.paren_mul_right (U64.v a.high) (pow2 (64-s)) (pow2 s);
Math.division_addition_lemma (U64.v a.low) (pow2 s) (U64.v a.high * pow2 (64 - s))
let shift_right_small (a: t) (s: U32.t{U32.v s < 64}) : Pure t
(requires True)
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
if U32.eq s 0ul then a
else
let r = { low = add_u64_shift_right_respec a.high a.low s;
high = U64.shift_right a.high s; } in
let a_h = U64.v a.high in
let a_l = U64.v a.low in
let s = U32.v s in
shift_right_reconstruct a_h s;
assert (v r == a_h * pow2 (64-s) + a_l / pow2 s);
u128_div_pow2 a s;
r
let shift_right_large (a: t) (s: U32.t{U32.v s >= 64 /\ U32.v s < 128}) : Pure t
(requires True)
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
let r = { high = U64.uint_to_t 0;
low = U64.shift_right a.high (U32.sub s u32_64); } in
let s = U32.v s in
Math.pow2_plus 64 (s - 64);
div_product (v a) (pow2 64) (pow2 (s - 64));
assert (v a / pow2 s == v a / pow2 64 / pow2 (s - 64));
div_plus_multiple (U64.v a.low) (U64.v a.high) (pow2 64);
r
let shift_right (a: t) (s: U32.t) : Pure t
(requires (U32.v s < 128))
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
if U32.lt s u32_64
then shift_right_small a s
else shift_right_large a s
let eq (a b:t) = U64.eq a.low b.low && U64.eq a.high b.high
let gt (a b:t) = U64.gt a.high b.high ||
(U64.eq a.high b.high && U64.gt a.low b.low)
let lt (a b:t) = U64.lt a.high b.high ||
(U64.eq a.high b.high && U64.lt a.low b.low)
let gte (a b:t) = U64.gt a.high b.high ||
(U64.eq a.high b.high && U64.gte a.low b.low)
let lte (a b:t) = U64.lt a.high b.high ||
(U64.eq a.high b.high && U64.lte a.low b.low)
let u64_logand_comm (a b:U64.t) : Lemma (U64.logand a b == U64.logand b a) =
UInt.logand_commutative (U64.v a) (U64.v b)
val u64_and_0 (a b:U64.t) :
Lemma (U64.v b = 0 ==> U64.v (U64.logand a b) = 0)
[SMTPat (U64.logand a b)]
let u64_and_0 a b = UInt.logand_lemma_1 (U64.v a)
let u64_0_and (a b:U64.t) :
Lemma (U64.v a = 0 ==> U64.v (U64.logand a b) = 0)
[SMTPat (U64.logand a b)] =
u64_logand_comm a b
val u64_1s_and (a b:U64.t) :
Lemma (U64.v a = pow2 64 - 1 /\
U64.v b = pow2 64 - 1 ==> U64.v (U64.logand a b) = pow2 64 - 1)
[SMTPat (U64.logand a b)]
let u64_1s_and a b = UInt.logand_lemma_2 (U64.v a)
let eq_mask (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a = v b ==> v r = pow2 128 - 1) /\ (v a <> v b ==> v r = 0))) =
let mask = U64.logand (U64.eq_mask a.low b.low)
(U64.eq_mask a.high b.high) in
{ low = mask; high = mask; }
private let gte_characterization (a b: t) :
Lemma (v a >= v b ==>
U64.v a.high > U64.v b.high \/
(U64.v a.high = U64.v b.high /\ U64.v a.low >= U64.v b.low)) = ()
private let lt_characterization (a b: t) :
Lemma (v a < v b ==>
U64.v a.high < U64.v b.high \/
(U64.v a.high = U64.v b.high /\ U64.v a.low < U64.v b.low)) = ()
let u64_logor_comm (a b:U64.t) : Lemma (U64.logor a b == U64.logor b a) =
UInt.logor_commutative (U64.v a) (U64.v b)
val u64_or_1 (a b:U64.t) :
Lemma (U64.v b = pow2 64 - 1 ==> U64.v (U64.logor a b) = pow2 64 - 1)
[SMTPat (U64.logor a b)]
let u64_or_1 a b = UInt.logor_lemma_2 (U64.v a)
let u64_1_or (a b:U64.t) :
Lemma (U64.v a = pow2 64 - 1 ==> U64.v (U64.logor a b) = pow2 64 - 1)
[SMTPat (U64.logor a b)] =
u64_logor_comm a b
val u64_or_0 (a b:U64.t) :
Lemma (U64.v a = 0 /\ U64.v b = 0 ==> U64.v (U64.logor a b) = 0)
[SMTPat (U64.logor a b)]
let u64_or_0 a b = UInt.logor_lemma_1 (U64.v a)
val u64_not_0 (a:U64.t) :
Lemma (U64.v a = 0 ==> U64.v (U64.lognot a) = pow2 64 - 1)
[SMTPat (U64.lognot a)]
let u64_not_0 a = UInt.lognot_lemma_1 #64
val u64_not_1 (a:U64.t) :
Lemma (U64.v a = pow2 64 - 1 ==> U64.v (U64.lognot a) = 0)
[SMTPat (U64.lognot a)]
let u64_not_1 a =
UInt.nth_lemma (UInt.lognot #64 (UInt.ones 64)) (UInt.zero 64)
let gte_mask (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a >= v b ==> v r = pow2 128 - 1) /\ (v a < v b ==> v r = 0))) =
let mask_hi_gte = U64.logand (U64.gte_mask a.high b.high)
(U64.lognot (U64.eq_mask a.high b.high)) in
let mask_lo_gte = U64.logand (U64.eq_mask a.high b.high)
(U64.gte_mask a.low b.low) in
let mask = U64.logor mask_hi_gte mask_lo_gte in
gte_characterization a b;
lt_characterization a b;
{ low = mask; high = mask; }
let uint64_to_uint128 (a:U64.t) = { low = a; high = U64.uint_to_t 0; }
let uint128_to_uint64 (a:t) : b:U64.t{U64.v b == v a % pow2 64} = a.low
inline_for_extraction
let u64_l32_mask: x:U64.t{U64.v x == pow2 32 - 1} = U64.uint_to_t 0xffffffff
let u64_mod_32 (a: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r = U64.v a % pow2 32)) =
UInt.logand_mask (U64.v a) 32;
U64.logand a u64_l32_mask
let u64_32_digits (a: U64.t) : Lemma (U64.v a / pow2 32 * pow2 32 + U64.v a % pow2 32 == U64.v a) =
div_mod (U64.v a) (pow2 32)
val mul32_digits : x:UInt.uint_t 64 -> y:UInt.uint_t 32 ->
Lemma (x * y == (x / pow2 32 * y) * pow2 32 + (x % pow2 32) * y)
let mul32_digits x y = ()
let u32_32 : x:U32.t{U32.v x == 32} = U32.uint_to_t 32
#push-options "--z3rlimit 30"
let u32_combine (hi lo: U64.t) : Pure U64.t
(requires (U64.v lo < pow2 32))
(ensures (fun r -> U64.v r = U64.v hi % pow2 32 * pow2 32 + U64.v lo)) =
U64.add lo (U64.shift_left hi u32_32)
#pop-options
let product_bound (a b:nat) (k:pos) :
Lemma (requires (a < k /\ b < k))
(ensures a * b <= k * k - 2*k + 1) =
Math.lemma_mult_le_right b a (k-1);
Math.lemma_mult_le_left (k-1) b (k-1)
val uint_product_bound : #n:nat -> a:UInt.uint_t n -> b:UInt.uint_t n ->
Lemma (a * b <= pow2 (2*n) - 2*(pow2 n) + 1)
let uint_product_bound #n a b =
product_bound a b (pow2 n);
Math.pow2_plus n n
val u32_product_bound : a:nat{a < pow2 32} -> b:nat{b < pow2 32} ->
Lemma (UInt.size (a * b) 64 /\ a * b < pow2 64 - pow2 32 - 1)
let u32_product_bound a b =
uint_product_bound #32 a b
let mul32 x y =
let x0 = u64_mod_32 x in
let x1 = U64.shift_right x u32_32 in
u32_product_bound (U64.v x0) (U32.v y);
let x0y = U64.mul x0 (FStar.Int.Cast.uint32_to_uint64 y) in
let x0yl = u64_mod_32 x0y in
let x0yh = U64.shift_right x0y u32_32 in
u32_product_bound (U64.v x1) (U32.v y);
// not in the original C code
let x1y' = U64.mul x1 (FStar.Int.Cast.uint32_to_uint64 y) in
let x1y = U64.add x1y' x0yh in
// correspondence with C:
// r0 = r.low
// r0 is written using u32_combine hi lo = lo + hi << 32
// r1 = r.high
let r = { low = u32_combine x1y x0yl;
high = U64.shift_right x1y u32_32; } in
u64_32_digits x;
//assert (U64.v x == U64.v x1 * pow2 32 + U64.v x0);
assert (U64.v x0y == U64.v x0 * U32.v y);
u64_32_digits x0y;
//assert (U64.v x0y == U64.v x0yh * pow2 32 + U64.v x0yl);
assert (U64.v x1y' == U64.v x / pow2 32 * U32.v y);
mul32_digits (U64.v x) (U32.v y);
assert (U64.v x * U32.v y == U64.v x1y' * pow2 32 + U64.v x0y);
r
let l32 (x: UInt.uint_t 64) : UInt.uint_t 32 = x % pow2 32
let h32 (x: UInt.uint_t 64) : UInt.uint_t 32 = x / pow2 32
val mul32_bound : x:UInt.uint_t 32 -> y:UInt.uint_t 32 ->
n:UInt.uint_t 64{n < pow2 64 - pow2 32 - 1 /\ n == x * y}
let mul32_bound x y =
u32_product_bound x y;
x * y
let pll (x y: U64.t) : n:UInt.uint_t 64{n < pow2 64 - pow2 32 - 1} =
mul32_bound (l32 (U64.v x)) (l32 (U64.v y))
let plh (x y: U64.t) : n:UInt.uint_t 64{n < pow2 64 - pow2 32 - 1} =
mul32_bound (l32 (U64.v x)) (h32 (U64.v y))
let phl (x y: U64.t) : n:UInt.uint_t 64{n < pow2 64 - pow2 32 - 1} =
mul32_bound (h32 (U64.v x)) (l32 (U64.v y))
let phh (x y: U64.t) : n:UInt.uint_t 64{n < pow2 64 - pow2 32 - 1} =
mul32_bound (h32 (U64.v x)) (h32 (U64.v y))
let pll_l (x y: U64.t) : UInt.uint_t 32 =
l32 (pll x y)
let pll_h (x y: U64.t) : UInt.uint_t 32 =
h32 (pll x y)
let mul_wide_low (x y: U64.t) = (plh x y + (phl x y + pll_h x y) % pow2 32) * pow2 32 % pow2 64 + pll_l x y
let mul_wide_high (x y: U64.t) =
phh x y +
(phl x y + pll_h x y) / pow2 32 +
(plh x y + (phl x y + pll_h x y) % pow2 32) / pow2 32
inline_for_extraction noextract
let mul_wide_impl_t' (x y: U64.t) : Pure (tuple4 U64.t U64.t U64.t U64.t)
(requires True)
(ensures (fun r -> let (u1, w3, x', t') = r in
U64.v u1 == U64.v x % pow2 32 /\
U64.v w3 == pll_l x y /\
U64.v x' == h32 (U64.v x) /\
U64.v t' == phl x y + pll_h x y)) =
let u1 = u64_mod_32 x in
let v1 = u64_mod_32 y in
u32_product_bound (U64.v u1) (U64.v v1);
let t = U64.mul u1 v1 in
assert (U64.v t == pll x y);
let w3 = u64_mod_32 t in
assert (U64.v w3 == pll_l x y);
let k = U64.shift_right t u32_32 in
assert (U64.v k == pll_h x y);
let x' = U64.shift_right x u32_32 in
assert (U64.v x' == h32 (U64.v x));
u32_product_bound (U64.v x') (U64.v v1);
let t' = U64.add (U64.mul x' v1) k in
(u1, w3, x', t')
// similar to u32_combine, but use % 2^64 * 2^32
let u32_combine' (hi lo: U64.t) : Pure U64.t
(requires (U64.v lo < pow2 32))
(ensures (fun r -> U64.v r = U64.v hi * pow2 32 % pow2 64 + U64.v lo)) =
U64.add lo (U64.shift_left hi u32_32)
inline_for_extraction noextract
let mul_wide_impl (x: U64.t) (y: U64.t) :
Tot (r:t{U64.v r.low == mul_wide_low x y /\
U64.v r.high == mul_wide_high x y % pow2 64}) =
let (u1, w3, x', t') = mul_wide_impl_t' x y in
let k' = u64_mod_32 t' in
let w1 = U64.shift_right t' u32_32 in
assert (U64.v w1 == (phl x y + pll_h x y) / pow2 32);
let y' = U64.shift_right y u32_32 in
assert (U64.v y' == h32 (U64.v y));
u32_product_bound (U64.v u1) (U64.v y');
let t'' = U64.add (U64.mul u1 y') k' in
assert (U64.v t'' == plh x y + (phl x y + pll_h x y) % pow2 32);
let k'' = U64.shift_right t'' u32_32 in
assert (U64.v k'' == (plh x y + (phl x y + pll_h x y) % pow2 32) / pow2 32);
u32_product_bound (U64.v x') (U64.v y');
mod_mul_pow2 (U64.v t'') 32 64;
let r0 = u32_combine' t'' w3 in
// let r0 = U64.add (U64.shift_left t'' u32_32) w3 in
assert (U64.v r0 == (plh x y + (phl x y + pll_h x y) % pow2 32) * pow2 32 % pow2 64 + pll_l x y);
let xy_w1 = U64.add (U64.mul x' y') w1 in
assert (U64.v xy_w1 == phh x y + (phl x y + pll_h x y) / pow2 32);
let r1 = U64.add_mod xy_w1 k'' in
assert (U64.v r1 == (phh x y + (phl x y + pll_h x y) / pow2 32 + (plh x y + (phl x y + pll_h x y) % pow2 32) / pow2 32) % pow2 64);
let r = { low = r0; high = r1; } in
r
let product_sums (a b c d:nat) :
Lemma ((a + b) * (c + d) == a * c + a * d + b * c + b * d) = ()
val u64_32_product (xl xh yl yh:UInt.uint_t 32) :
Lemma ((xl + xh * pow2 32) * (yl + yh * pow2 32) ==
xl * yl + (xl * yh) * pow2 32 + (xh * yl) * pow2 32 + (xh * yh) * pow2 64)
#push-options "--z3rlimit 25"
let u64_32_product xl xh yl yh =
assert (xh >= 0); //flakiness; without this, can't prove that (xh * pow2 32) >= 0
assert (pow2 32 >= 0); //flakiness; without this, can't prove that (xh * pow2 32) >= 0
assert (xh*pow2 32 >= 0);
product_sums xl (xh*pow2 32) yl (yh*pow2 32);
mul_abc_to_acb xh (pow2 32) yl;
assert (xl * (yh * pow2 32) == (xl * yh) * pow2 32);
Math.pow2_plus 32 32;
assert ((xh * pow2 32) * (yh * pow2 32) == (xh * yh) * pow2 64)
#pop-options
let product_expand (x y: U64.t) :
Lemma (U64.v x * U64.v y == phh x y * pow2 64 +
(plh x y + phl x y + pll_h x y) * pow2 32 +
pll_l x y) =
assert (U64.v x == l32 (U64.v x) + h32 (U64.v x) * pow2 32);
assert (U64.v y == l32 (U64.v y) + h32 (U64.v y) * pow2 32);
u64_32_product (l32 (U64.v x)) (h32 (U64.v x)) (l32 (U64.v y)) (h32 (U64.v y))
let product_low_expand (x y: U64.t) :
Lemma ((U64.v x * U64.v y) % pow2 64 == | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val product_low_expand (x y: U64.t)
: Lemma
((U64.v x * U64.v y) % pow2 64 ==
((plh x y + phl x y + pll_h x y) * pow2 32 + pll_l x y) % pow2 64) | [] | FStar.UInt128.product_low_expand | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | x: FStar.UInt64.t -> y: FStar.UInt64.t
-> FStar.Pervasives.Lemma
(ensures
FStar.UInt64.v x * FStar.UInt64.v y % Prims.pow2 64 ==
((FStar.UInt128.plh x y + FStar.UInt128.phl x y + FStar.UInt128.pll_h x y) * Prims.pow2 32 +
FStar.UInt128.pll_l x y) %
Prims.pow2 64) | {
"end_col": 97,
"end_line": 1083,
"start_col": 2,
"start_line": 1082
} |
FStar.Pervasives.Lemma | val int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b)) | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b))
= introduce (a < b) ==> (bvult #n (int2bv #n a) (int2bv #n b))
with _ . FStar.BV.int2bv_lemma_ult_1 a b;
introduce (bvult #n (int2bv #n a) (int2bv #n b)) ==> (a < b)
with _ . FStar.BV.int2bv_lemma_ult_2 a b | val int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b))
let int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b)) = | false | null | true | introduce (a < b) ==> (bvult #n (int2bv #n a) (int2bv #n b))
with _. FStar.BV.int2bv_lemma_ult_1 a b;
introduce (bvult #n (int2bv #n a) (int2bv #n b)) ==> (a < b)
with _. FStar.BV.int2bv_lemma_ult_2 a b | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [
"lemma"
] | [
"Prims.pos",
"FStar.UInt.uint_t",
"FStar.Classical.Sugar.implies_intro",
"Prims.b2t",
"FStar.BV.bvult",
"FStar.BV.int2bv",
"Prims.squash",
"Prims.op_LessThan",
"FStar.BV.int2bv_lemma_ult_2",
"Prims.unit",
"FStar.BV.int2bv_lemma_ult_1",
"Prims.l_True",
"Prims.l_iff",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract
let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63
let carry_uint64_ok (a b:uint_t 64)
: squash (int2bv (carry_uint64 a b) == carry_bv a b)
= _ by (T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl())
let fact1 (a b: uint_t 64) = carry_bv a b == int2bv 1
let fact0 (a b: uint_t 64) = carry_bv a b == int2bv 0
let lem_ult_1 (a b: uint_t 64)
: squash (bvult (int2bv a) (int2bv b) ==> fact1 a b)
= assert (bvult (int2bv a) (int2bv b) ==> fact1 a b)
by (T.norm [delta_only [`%fact1;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'";
smt())
let lem_ult_2 (a b:uint_t 64)
: squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
= assert (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
by (T.norm [delta_only [`%fact0;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'")
let int2bv_ult (#n: pos) (a b: uint_t n) | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b)) | [] | FStar.UInt128.int2bv_ult | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: FStar.UInt.uint_t n -> b: FStar.UInt.uint_t n
-> FStar.Pervasives.Lemma
(ensures a < b <==> FStar.BV.bvult (FStar.BV.int2bv a) (FStar.BV.int2bv b)) | {
"end_col": 44,
"end_line": 88,
"start_col": 4,
"start_line": 85
} |
Prims.Tot | val uint128_to_uint64: a:t -> b:U64.t{U64.v b == v a % pow2 64} | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let uint128_to_uint64 (a:t) : b:U64.t{U64.v b == v a % pow2 64} = a.low | val uint128_to_uint64: a:t -> b:U64.t{U64.v b == v a % pow2 64}
let uint128_to_uint64 (a: t) : b: U64.t{U64.v b == v a % pow2 64} = | false | null | false | a.low | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [
"total"
] | [
"FStar.UInt128.t",
"FStar.UInt128.__proj__Mkuint128__item__low",
"FStar.UInt64.t",
"Prims.eq2",
"Prims.int",
"FStar.UInt64.v",
"Prims.op_Modulus",
"FStar.UInt128.v",
"Prims.pow2"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract
let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63
let carry_uint64_ok (a b:uint_t 64)
: squash (int2bv (carry_uint64 a b) == carry_bv a b)
= _ by (T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl())
let fact1 (a b: uint_t 64) = carry_bv a b == int2bv 1
let fact0 (a b: uint_t 64) = carry_bv a b == int2bv 0
let lem_ult_1 (a b: uint_t 64)
: squash (bvult (int2bv a) (int2bv b) ==> fact1 a b)
= assert (bvult (int2bv a) (int2bv b) ==> fact1 a b)
by (T.norm [delta_only [`%fact1;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'";
smt())
let lem_ult_2 (a b:uint_t 64)
: squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
= assert (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
by (T.norm [delta_only [`%fact0;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'")
let int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b))
= introduce (a < b) ==> (bvult #n (int2bv #n a) (int2bv #n b))
with _ . FStar.BV.int2bv_lemma_ult_1 a b;
introduce (bvult #n (int2bv #n a) (int2bv #n b)) ==> (a < b)
with _ . FStar.BV.int2bv_lemma_ult_2 a b
let lem_ult (a b:uint_t 64)
: Lemma (if a < b
then fact1 a b
else fact0 a b)
= int2bv_ult a b;
lem_ult_1 a b;
lem_ult_2 a b
let constant_time_carry (a b: U64.t) : Tot U64.t =
let open U64 in
// CONSTANT_TIME_CARRY macro
// ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
// 63 = sizeof(a) * 8 - 1
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63ul
let carry_uint64_equiv (a b:UInt64.t)
: Lemma (U64.v (constant_time_carry a b) == carry_uint64 (U64.v a) (U64.v b))
= ()
// This type gets a special treatment in KaRaMeL and its definition is never
// printed in the resulting C file.
type uint128: Type0 = { low: U64.t; high: U64.t }
let t = uint128
let _ = intro_ambient n
let _ = intro_ambient t
[@@ noextract_to "krml"]
let v x = U64.v x.low + (U64.v x.high) * (pow2 64)
let div_mod (x:nat) (k:nat{k > 0}) : Lemma (x / k * k + x % k == x) = ()
let uint_to_t x =
div_mod x (pow2 64);
{ low = U64.uint_to_t (x % (pow2 64));
high = U64.uint_to_t (x / (pow2 64)); }
let v_inj (x1 x2: t): Lemma (requires (v x1 == v x2))
(ensures x1 == x2) =
assert (uint_to_t (v x1) == uint_to_t (v x2));
assert (uint_to_t (v x1) == x1);
assert (uint_to_t (v x2) == x2);
()
(* A weird helper used below... seems like the native encoding of
bitvectors may be making these proofs much harder than they should be? *)
let bv2int_fun (#n:pos) (a b : bv_t n)
: Lemma (requires a == b)
(ensures bv2int a == bv2int b)
= ()
(* This proof is quite brittle. It has a bunch of annotations to get
decent verification performance. *)
let constant_time_carry_ok (a b:U64.t)
: Lemma (constant_time_carry a b ==
(if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0))
= calc (==) {
U64.v (constant_time_carry a b);
(==) { carry_uint64_equiv a b }
carry_uint64 (U64.v a) (U64.v b);
(==) { inverse_num_lemma (carry_uint64 (U64.v a) (U64.v b)) }
bv2int (int2bv (carry_uint64 (U64.v a) (U64.v b)));
(==) { carry_uint64_ok (U64.v a) (U64.v b);
bv2int_fun (int2bv (carry_uint64 (U64.v a) (U64.v b))) (carry_bv (U64.v a) (U64.v b));
()
}
bv2int (carry_bv (U64.v a) (U64.v b));
(==) {
lem_ult (U64.v a) (U64.v b);
bv2int_fun (carry_bv (U64.v a) (U64.v b)) (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
}
bv2int
(if U64.v a < U64.v b
then int2bv 1
else int2bv 0);
};
assert (
bv2int (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
== U64.v (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)) by (T.norm []);
U64.v_inj (constant_time_carry a b) (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)
let carry (a b: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r == (if U64.v a < U64.v b then 1 else 0))) =
constant_time_carry_ok a b;
constant_time_carry a b
let carry_sum_ok (a b:U64.t) :
Lemma (U64.v (carry (U64.add_mod a b) b) == (U64.v a + U64.v b) / (pow2 64)) = ()
let add (a b: t) : Pure t
(requires (v a + v b < pow2 128))
(ensures (fun r -> v a + v b = v r)) =
let l = U64.add_mod a.low b.low in
carry_sum_ok a.low b.low;
{ low = l;
high = U64.add (U64.add a.high b.high) (carry l b.low); }
let add_underspec (a b: t) =
let l = U64.add_mod a.low b.low in
begin
if v a + v b < pow2 128
then carry_sum_ok a.low b.low
else ()
end;
{ low = l;
high = U64.add_underspec (U64.add_underspec a.high b.high) (carry l b.low); }
val mod_mod: a:nat -> k:nat{k>0} -> k':nat{k'>0} ->
Lemma ((a % k) % (k'*k) == a % k)
let mod_mod a k k' =
assert (a % k < k);
assert (a % k < k' * k)
let mod_spec (a:nat) (k:nat{k > 0}) :
Lemma (a % k == a - a / k * k) = ()
val div_product : n:nat -> m1:nat{m1>0} -> m2:nat{m2>0} ->
Lemma (n / (m1*m2) == (n / m1) / m2)
let div_product n m1 m2 =
Math.division_multiplication_lemma n m1 m2
val mul_div_cancel : n:nat -> k:nat{k>0} ->
Lemma ((n * k) / k == n)
let mul_div_cancel n k =
Math.cancel_mul_div n k
val mod_mul: n:nat -> k1:pos -> k2:pos ->
Lemma ((n % k2) * k1 == (n * k1) % (k1*k2))
let mod_mul n k1 k2 =
Math.modulo_scale_lemma n k1 k2
let mod_spec_rew_n (n:nat) (k:nat{k > 0}) :
Lemma (n == n / k * k + n % k) = mod_spec n k
val mod_add: n1:nat -> n2:nat -> k:nat{k > 0} ->
Lemma ((n1 % k + n2 % k) % k == (n1 + n2) % k)
let mod_add n1 n2 k = Math.modulo_distributivity n1 n2 k
val mod_add_small: n1:nat -> n2:nat -> k:nat{k > 0} -> Lemma
(requires (n1 % k + n2 % k < k))
(ensures (n1 % k + n2 % k == (n1 + n2) % k))
let mod_add_small n1 n2 k =
mod_add n1 n2 k;
Math.small_modulo_lemma_1 (n1%k + n2%k) k
// This proof is pretty stable with the calc proof, but it can fail
// ~1% of the times, so add a retry.
#push-options "--split_queries no --z3rlimit 20 --retry 5"
let add_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a + v b) % pow2 128 = v r)) =
let l = U64.add_mod a.low b.low in
let r = { low = l;
high = U64.add_mod (U64.add_mod a.high b.high) (carry l b.low)} in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
let b_l = U64.v b.low in
let b_h = U64.v b.high in
carry_sum_ok a.low b.low;
Math.lemma_mod_plus_distr_l (a_h + b_h) ((a_l + b_l) / (pow2 64)) (pow2 64);
calc (==) {
U64.v r.high * pow2 64;
== {}
((a_h + b_h + (a_l + b_l) / (pow2 64)) % pow2 64) * pow2 64;
== { mod_mul (a_h + b_h + (a_l + b_l) / (pow2 64)) (pow2 64) (pow2 64) }
((a_h + b_h + (a_l + b_l) / (pow2 64)) * pow2 64) % (pow2 64 * pow2 64);
== {}
((a_h + b_h + (a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
== {}
(a_h * pow2 64 + b_h * pow2 64 + ((a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
};
assert (U64.v r.low == (U64.v r.low) % pow2 128);
mod_add_small (a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64))
((a_l + b_l) % (pow2 64))
(pow2 128);
assert (U64.v r.low + U64.v r.high * pow2 64 ==
(a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64) + (a_l + b_l) % (pow2 64)) % pow2 128);
mod_spec_rew_n (a_l + b_l) (pow2 64);
assert (v r ==
(a_h * pow2 64 +
b_h * pow2 64 +
a_l + b_l) % pow2 128);
assert_spinoff ((v a + v b) % pow2 128 = v r);
r
#pop-options
#push-options "--retry 5"
let sub (a b: t) : Pure t
(requires (v a - v b >= 0))
(ensures (fun r -> v r = v a - v b)) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub (U64.sub a.high b.high) (carry a.low l); }
#pop-options
let sub_underspec (a b: t) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_underspec (U64.sub_underspec a.high b.high) (carry a.low l); }
let sub_mod_impl (a b: t) : t =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_mod (U64.sub_mod a.high b.high) (carry a.low l); }
#push-options "--retry 10" // flaky
let sub_mod_pos_ok (a b:t) : Lemma
(requires (v a - v b >= 0))
(ensures (v (sub_mod_impl a b) = v a - v b)) =
assert (sub a b == sub_mod_impl a b);
()
#pop-options
val u64_diff_wrap : a:U64.t -> b:U64.t ->
Lemma (requires (U64.v a < U64.v b))
(ensures (U64.v (U64.sub_mod a b) == U64.v a - U64.v b + pow2 64))
let u64_diff_wrap a b = ()
#push-options "--z3rlimit 20"
val sub_mod_wrap1_ok : a:t -> b:t -> Lemma
(requires (v a - v b < 0 /\ U64.v a.low < U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128))
#push-options "--retry 10"
let sub_mod_wrap1_ok a b =
// carry == 1 and subtraction in low wraps
let l = U64.sub_mod a.low b.low in
assert (U64.v (carry a.low l) == 1);
u64_diff_wrap a.low b.low;
// a.high <= b.high since v a < v b;
// case split on equality and strictly less
if U64.v a.high = U64.v b.high then ()
else begin
u64_diff_wrap a.high b.high;
()
end
#pop-options
let sum_lt (a1 a2 b1 b2:nat) : Lemma
(requires (a1 + a2 < b1 + b2 /\ a1 >= b1))
(ensures (a2 < b2)) = ()
let sub_mod_wrap2_ok (a b:t) : Lemma
(requires (v a - v b < 0 /\ U64.v a.low >= U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
// carry == 0, subtraction in low is exact, but subtraction in high
// must wrap since v a < v b
let l = U64.sub_mod a.low b.low in
let r = sub_mod_impl a b in
assert (U64.v l == U64.v a.low - U64.v b.low);
assert (U64.v (carry a.low l) == 0);
sum_lt (U64.v a.low) (U64.v a.high * pow2 64) (U64.v b.low) (U64.v b.high * pow2 64);
assert (U64.v (U64.sub_mod a.high b.high) == U64.v a.high - U64.v b.high + pow2 64);
()
let sub_mod_wrap_ok (a b:t) : Lemma
(requires (v a - v b < 0))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
if U64.v a.low < U64.v b.low
then sub_mod_wrap1_ok a b
else sub_mod_wrap2_ok a b
#push-options "--z3rlimit 40"
let sub_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = (v a - v b) % pow2 128)) =
(if v a - v b >= 0
then sub_mod_pos_ok a b
else sub_mod_wrap_ok a b);
sub_mod_impl a b
#pop-options
val shift_bound : #n:nat -> num:UInt.uint_t n -> n':nat ->
Lemma (num * pow2 n' <= pow2 (n'+n) - pow2 n')
let shift_bound #n num n' =
Math.lemma_mult_le_right (pow2 n') num (pow2 n - 1);
Math.distributivity_sub_left (pow2 n) 1 (pow2 n');
Math.pow2_plus n' n
val append_uint : #n1:nat -> #n2:nat -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 -> UInt.uint_t (n1+n2)
let append_uint #n1 #n2 num1 num2 =
shift_bound num2 n1;
num1 + num2 * pow2 n1
val to_vec_append : #n1:nat{n1 > 0} -> #n2:nat{n2 > 0} -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 ->
Lemma (UInt.to_vec (append_uint num1 num2) == Seq.append (UInt.to_vec num2) (UInt.to_vec num1))
let to_vec_append #n1 #n2 num1 num2 =
UInt.append_lemma (UInt.to_vec num2) (UInt.to_vec num1)
let vec128 (a: t) : BV.bv_t 128 = UInt.to_vec #128 (v a)
let vec64 (a: U64.t) : BV.bv_t 64 = UInt.to_vec (U64.v a)
let to_vec_v (a: t) :
Lemma (vec128 a == Seq.append (vec64 a.high) (vec64 a.low)) =
to_vec_append (U64.v a.low) (U64.v a.high)
val logand_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2) ==
BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2))
(BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logand #128 (v a) (v b))) =
let r = { low = U64.logand a.low b.low;
high = U64.logand a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logand_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logand_vec (vec128 a) (vec128 b));
r
val logxor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2) ==
BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2))
(BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logxor #128 (v a) (v b))) =
let r = { low = U64.logxor a.low b.low;
high = U64.logxor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logxor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logxor_vec (vec128 a) (vec128 b));
r
val logor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2) ==
BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2))
(BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logor #128 (v a) (v b))) =
let r = { low = U64.logor a.low b.low;
high = U64.logor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logor_vec (vec128 a) (vec128 b));
r
val lognot_vec_append (#n1 #n2: pos) (a1: BV.bv_t n1) (a2: BV.bv_t n2) :
Lemma (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2) ==
BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot_vec_append #n1 #n2 a1 a2 =
Seq.lemma_eq_intro (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2))
(BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot (a: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.lognot #128 (v a))) =
let r = { low = U64.lognot a.low;
high = U64.lognot a.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
lognot_vec_append (vec64 a.high) (vec64 a.low);
to_vec_v a;
assert (vec128 r == BV.lognot_vec (vec128 a));
r
let mod_mul_cancel (n:nat) (k:nat{k > 0}) :
Lemma ((n * k) % k == 0) =
mod_spec (n * k) k;
mul_div_cancel n k;
()
let shift_past_mod (n:nat) (k1:nat) (k2:nat{k2 >= k1}) :
Lemma ((n * pow2 k2) % pow2 k1 == 0) =
assert (k2 == (k2 - k1) + k1);
Math.pow2_plus (k2 - k1) k1;
Math.paren_mul_right n (pow2 (k2 - k1)) (pow2 k1);
mod_mul_cancel (n * pow2 (k2 - k1)) (pow2 k1)
val mod_double: a:nat -> k:nat{k>0} ->
Lemma (a % k % k == a % k)
let mod_double a k =
mod_mod a k 1
let shift_left_large_val (#n1:nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s:nat) :
Lemma ((a1 + a2 * pow2 n1) * pow2 s == (a1 * pow2 s + a2 * pow2 (n1+s))) =
Math.distributivity_add_left a1 (a2 * pow2 n1) (pow2 s);
Math.paren_mul_right a2 (pow2 n1) (pow2 s);
Math.pow2_plus n1 s
#push-options "--z3rlimit 40"
let shift_left_large_lemma (#n1: nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s: nat{s >= n2}) :
Lemma (((a1 + a2 * pow2 n1) * pow2 s) % pow2 (n1+n2) ==
(a1 * pow2 s) % pow2 (n1+n2)) =
shift_left_large_val a1 a2 s;
mod_add (a1 * pow2 s) (a2 * pow2 (n1+s)) (pow2 (n1+n2));
shift_past_mod a2 (n1+n2) (n1+s);
mod_double (a1 * pow2 s) (pow2 (n1+n2));
()
#pop-options
val shift_left_large_lemma_t : a:t -> s:nat ->
Lemma (requires (s >= 64))
(ensures ((v a * pow2 s) % pow2 128 ==
(U64.v a.low * pow2 s) % pow2 128))
let shift_left_large_lemma_t a s =
shift_left_large_lemma #64 #64 (U64.v a.low) (U64.v a.high) s
private let u32_64: n:U32.t{U32.v n == 64} = U32.uint_to_t 64
val div_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} -> Lemma
(requires True)
(ensures (a / pow2 (n1 - n2) == a * pow2 n2 / pow2 n1))
let div_pow2_diff a n1 n2 =
Math.pow2_plus n2 (n1-n2);
assert (a * pow2 n2 / pow2 n1 == a * pow2 n2 / (pow2 n2 * pow2 (n1 - n2)));
div_product (a * pow2 n2) (pow2 n2) (pow2 (n1-n2));
mul_div_cancel a (pow2 n2)
val mod_mul_pow2 : n:nat -> e1:nat -> e2:nat ->
Lemma (n % pow2 e1 * pow2 e2 <= pow2 (e1+e2) - pow2 e2)
let mod_mul_pow2 n e1 e2 =
Math.lemma_mod_lt n (pow2 e1);
Math.lemma_mult_le_right (pow2 e2) (n % pow2 e1) (pow2 e1 - 1);
assert (n % pow2 e1 * pow2 e2 <= pow2 e1 * pow2 e2 - pow2 e2);
Math.pow2_plus e1 e2
let pow2_div_bound #b (n:UInt.uint_t b) (s:nat{s <= b}) :
Lemma (n / pow2 s < pow2 (b - s)) =
Math.lemma_div_lt n b s
#push-options "--smtencoding.l_arith_repr native --z3rlimit 40"
let add_u64_shift_left (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r = (U64.v hi * pow2 (U32.v s)) % pow2 64 + U64.v lo / pow2 (64 - U32.v s))) =
let high = U64.shift_left hi s in
let low = U64.shift_right lo (U32.sub u32_64 s) in
let s = U32.v s in
let high_n = U64.v hi % pow2 (64 - s) * pow2 s in
let low_n = U64.v lo / pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 s) (pow2 (64-s));
assert (U64.v high == high_n);
assert (U64.v low == low_n);
pow2_div_bound (U64.v lo) (64-s);
assert (low_n < pow2 s);
mod_mul_pow2 (U64.v hi) (64 - s) s;
U64.add high low
#pop-options
let div_plus_multiple (a:nat) (b:nat) (k:pos) :
Lemma (requires (a < k))
(ensures ((a + b * k) / k == b)) =
Math.division_addition_lemma a k b;
Math.small_division_lemma_1 a k
val div_add_small: n:nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (k1*m / (k1*k2) == (n + k1*m) / (k1*k2)))
let div_add_small n m k1 k2 =
div_product (k1*m) k1 k2;
div_product (n+k1*m) k1 k2;
mul_div_cancel m k1;
assert (k1*m/k1 == m);
div_plus_multiple n m k1
val add_mod_small: n: nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (n + (k1 * m) % (k1 * k2) ==
(n + k1 * m) % (k1 * k2)))
let add_mod_small n m k1 k2 =
mod_spec (k1 * m) (k1 * k2);
mod_spec (n + k1 * m) (k1 * k2);
div_add_small n m k1 k2
let mod_then_mul_64 (n:nat) : Lemma (n % pow2 64 * pow2 64 == n * pow2 64 % pow2 128) =
Math.pow2_plus 64 64;
mod_mul n (pow2 64) (pow2 64)
let mul_abc_to_acb (a b c: int) : Lemma (a * b * c == a * c * b) = ()
let add_u64_shift_left_respec (hi lo:U64.t) (s:U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r ->
U64.v r * pow2 64 ==
(U64.v hi * pow2 64) * pow2 (U32.v s) % pow2 128 +
U64.v lo * pow2 (U32.v s) / pow2 64 * pow2 64)) =
let r = add_u64_shift_left hi lo s in
let hi = U64.v hi in
let lo = U64.v lo in
let s = U32.v s in
// spec of add_u64_shift_left
assert (U64.v r == hi * pow2 s % pow2 64 + lo / pow2 (64 - s));
Math.distributivity_add_left (hi * pow2 s % pow2 64) (lo / pow2 (64-s)) (pow2 64);
mod_then_mul_64 (hi * pow2 s);
assert (hi * pow2 s % pow2 64 * pow2 64 == (hi * pow2 s * pow2 64) % pow2 128);
div_pow2_diff lo 64 s;
assert (lo / pow2 (64-s) == lo * pow2 s / pow2 64);
assert (U64.v r * pow2 64 == hi * pow2 s * pow2 64 % pow2 128 + lo * pow2 s / pow2 64 * pow2 64);
mul_abc_to_acb hi (pow2 s) (pow2 64);
r
val add_mod_small' : n:nat -> m:nat -> k:pos ->
Lemma (requires (n + m % k < k))
(ensures (n + m % k == (n + m) % k))
let add_mod_small' n m k =
Math.lemma_mod_lt (n + m % k) k;
Math.modulo_lemma n k;
mod_add n m k
#push-options "--retry 5"
let shift_t_val (a: t) (s: nat) :
Lemma (v a * pow2 s == U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s)) =
Math.pow2_plus 64 s;
()
#pop-options
val mul_mod_bound : n:nat -> s1:nat -> s2:nat{s2>=s1} ->
Lemma (n * pow2 s1 % pow2 s2 <= pow2 s2 - pow2 s1)
#push-options "--retry 5"
let mul_mod_bound n s1 s2 =
// n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1
// n % pow2 (s2-s1) <= pow2 (s2-s1) - 1
// n % pow2 (s2-s1) * pow2 s1 <= pow2 s2 - pow2 s1
mod_mul n (pow2 s1) (pow2 (s2-s1));
// assert (n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1);
Math.lemma_mod_lt n (pow2 (s2-s1));
Math.lemma_mult_le_right (pow2 s1) (n % pow2 (s2-s1)) (pow2 (s2-s1) - 1);
Math.pow2_plus (s2-s1) s1
#pop-options
let add_lt_le (a a' b b': int) :
Lemma (requires (a < a' /\ b <= b'))
(ensures (a + b < a' + b')) = ()
let u64_pow2_bound (a: UInt.uint_t 64) (s: nat) :
Lemma (a * pow2 s < pow2 (64+s)) =
Math.pow2_plus 64 s;
Math.lemma_mult_le_right (pow2 s) a (pow2 64)
let shift_t_mod_val' (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s) % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
u64_pow2_bound a_l s;
mul_mod_bound a_h (64+s) 128;
// assert (a_h * pow2 (64+s) % pow2 128 <= pow2 128 - pow2 (64+s));
add_lt_le (a_l * pow2 s) (pow2 (64+s)) (a_h * pow2 (64+s) % pow2 128) (pow2 128 - pow2 (64+s));
add_mod_small' (a_l * pow2 s) (a_h * pow2 (64+s)) (pow2 128);
shift_t_val a s;
()
let shift_t_mod_val (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + (U64.v a.high * pow2 64) * pow2 s % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
shift_t_mod_val' a s;
Math.pow2_plus 64 s;
Math.paren_mul_right a_h (pow2 64) (pow2 s);
()
#push-options "--z3rlimit 20"
let shift_left_small (a: t) (s: U32.t) : Pure t
(requires (U32.v s < 64))
(ensures (fun r -> v r = (v a * pow2 (U32.v s)) % pow2 128)) =
if U32.eq s 0ul then a
else
let r = { low = U64.shift_left a.low s;
high = add_u64_shift_left_respec a.high a.low s; } in
let s = U32.v s in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
mod_spec_rew_n (a_l * pow2 s) (pow2 64);
shift_t_mod_val a s;
r
#pop-options
val shift_left_large : a:t -> s:U32.t{U32.v s >= 64 /\ U32.v s < 128} ->
r:t{v r = (v a * pow2 (U32.v s)) % pow2 128}
#push-options "--z3rlimit 50 --retry 5" // sporadically fails
let shift_left_large a s =
let h_shift = U32.sub s u32_64 in
assert (U32.v h_shift < 64);
let r = { low = U64.uint_to_t 0;
high = U64.shift_left a.low h_shift; } in
assert (U64.v r.high == (U64.v a.low * pow2 (U32.v s - 64)) % pow2 64);
mod_mul (U64.v a.low * pow2 (U32.v s - 64)) (pow2 64) (pow2 64);
Math.pow2_plus (U32.v s - 64) 64;
assert (U64.v r.high * pow2 64 == (U64.v a.low * pow2 (U32.v s)) % pow2 128);
shift_left_large_lemma_t a (U32.v s);
r
#pop-options
let shift_left a s =
if (U32.lt s u32_64) then shift_left_small a s
else shift_left_large a s
let add_u64_shift_right (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r == U64.v lo / pow2 (U32.v s) +
U64.v hi * pow2 (64 - U32.v s) % pow2 64)) =
let low = U64.shift_right lo s in
let high = U64.shift_left hi (U32.sub u32_64 s) in
let s = U32.v s in
let low_n = U64.v lo / pow2 s in
let high_n = U64.v hi % pow2 s * pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 (64-s)) (pow2 s);
assert (U64.v high == high_n);
pow2_div_bound (U64.v lo) s;
assert (low_n < pow2 (64 - s));
mod_mul_pow2 (U64.v hi) s (64 - s);
U64.add low high
val mul_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} ->
Lemma (a * pow2 (n1 - n2) == a * pow2 n1 / pow2 n2)
let mul_pow2_diff a n1 n2 =
Math.paren_mul_right a (pow2 (n1-n2)) (pow2 n2);
mul_div_cancel (a * pow2 (n1 - n2)) (pow2 n2);
Math.pow2_plus (n1 - n2) n2
let add_u64_shift_right_respec (hi lo:U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r == U64.v lo / pow2 (U32.v s) +
U64.v hi * pow2 64 / pow2 (U32.v s) % pow2 64)) =
let r = add_u64_shift_right hi lo s in
let s = U32.v s in
mul_pow2_diff (U64.v hi) 64 s;
r
let mul_div_spec (n:nat) (k:pos) : Lemma (n / k * k == n - n % k) = ()
let mul_distr_sub (n1 n2:nat) (k:nat) : Lemma ((n1 - n2) * k == n1 * k - n2 * k) = ()
val div_product_comm : n1:nat -> k1:pos -> k2:pos ->
Lemma (n1 / k1 / k2 == n1 / k2 / k1)
let div_product_comm n1 k1 k2 =
div_product n1 k1 k2;
div_product n1 k2 k1
val shift_right_reconstruct : a_h:UInt.uint_t 64 -> s:nat{s < 64} ->
Lemma (a_h * pow2 (64-s) == a_h / pow2 s * pow2 64 + a_h * pow2 64 / pow2 s % pow2 64)
let shift_right_reconstruct a_h s =
mul_pow2_diff a_h 64 s;
mod_spec_rew_n (a_h * pow2 (64-s)) (pow2 64);
div_product_comm (a_h * pow2 64) (pow2 s) (pow2 64);
mul_div_cancel a_h (pow2 64);
assert (a_h / pow2 s * pow2 64 == a_h * pow2 64 / pow2 s / pow2 64 * pow2 64);
()
val u128_div_pow2 (a: t) (s:nat{s < 64}) :
Lemma (v a / pow2 s == U64.v a.low / pow2 s + U64.v a.high * pow2 (64 - s))
let u128_div_pow2 a s =
Math.pow2_plus (64-s) s;
Math.paren_mul_right (U64.v a.high) (pow2 (64-s)) (pow2 s);
Math.division_addition_lemma (U64.v a.low) (pow2 s) (U64.v a.high * pow2 (64 - s))
let shift_right_small (a: t) (s: U32.t{U32.v s < 64}) : Pure t
(requires True)
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
if U32.eq s 0ul then a
else
let r = { low = add_u64_shift_right_respec a.high a.low s;
high = U64.shift_right a.high s; } in
let a_h = U64.v a.high in
let a_l = U64.v a.low in
let s = U32.v s in
shift_right_reconstruct a_h s;
assert (v r == a_h * pow2 (64-s) + a_l / pow2 s);
u128_div_pow2 a s;
r
let shift_right_large (a: t) (s: U32.t{U32.v s >= 64 /\ U32.v s < 128}) : Pure t
(requires True)
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
let r = { high = U64.uint_to_t 0;
low = U64.shift_right a.high (U32.sub s u32_64); } in
let s = U32.v s in
Math.pow2_plus 64 (s - 64);
div_product (v a) (pow2 64) (pow2 (s - 64));
assert (v a / pow2 s == v a / pow2 64 / pow2 (s - 64));
div_plus_multiple (U64.v a.low) (U64.v a.high) (pow2 64);
r
let shift_right (a: t) (s: U32.t) : Pure t
(requires (U32.v s < 128))
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
if U32.lt s u32_64
then shift_right_small a s
else shift_right_large a s
let eq (a b:t) = U64.eq a.low b.low && U64.eq a.high b.high
let gt (a b:t) = U64.gt a.high b.high ||
(U64.eq a.high b.high && U64.gt a.low b.low)
let lt (a b:t) = U64.lt a.high b.high ||
(U64.eq a.high b.high && U64.lt a.low b.low)
let gte (a b:t) = U64.gt a.high b.high ||
(U64.eq a.high b.high && U64.gte a.low b.low)
let lte (a b:t) = U64.lt a.high b.high ||
(U64.eq a.high b.high && U64.lte a.low b.low)
let u64_logand_comm (a b:U64.t) : Lemma (U64.logand a b == U64.logand b a) =
UInt.logand_commutative (U64.v a) (U64.v b)
val u64_and_0 (a b:U64.t) :
Lemma (U64.v b = 0 ==> U64.v (U64.logand a b) = 0)
[SMTPat (U64.logand a b)]
let u64_and_0 a b = UInt.logand_lemma_1 (U64.v a)
let u64_0_and (a b:U64.t) :
Lemma (U64.v a = 0 ==> U64.v (U64.logand a b) = 0)
[SMTPat (U64.logand a b)] =
u64_logand_comm a b
val u64_1s_and (a b:U64.t) :
Lemma (U64.v a = pow2 64 - 1 /\
U64.v b = pow2 64 - 1 ==> U64.v (U64.logand a b) = pow2 64 - 1)
[SMTPat (U64.logand a b)]
let u64_1s_and a b = UInt.logand_lemma_2 (U64.v a)
let eq_mask (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a = v b ==> v r = pow2 128 - 1) /\ (v a <> v b ==> v r = 0))) =
let mask = U64.logand (U64.eq_mask a.low b.low)
(U64.eq_mask a.high b.high) in
{ low = mask; high = mask; }
private let gte_characterization (a b: t) :
Lemma (v a >= v b ==>
U64.v a.high > U64.v b.high \/
(U64.v a.high = U64.v b.high /\ U64.v a.low >= U64.v b.low)) = ()
private let lt_characterization (a b: t) :
Lemma (v a < v b ==>
U64.v a.high < U64.v b.high \/
(U64.v a.high = U64.v b.high /\ U64.v a.low < U64.v b.low)) = ()
let u64_logor_comm (a b:U64.t) : Lemma (U64.logor a b == U64.logor b a) =
UInt.logor_commutative (U64.v a) (U64.v b)
val u64_or_1 (a b:U64.t) :
Lemma (U64.v b = pow2 64 - 1 ==> U64.v (U64.logor a b) = pow2 64 - 1)
[SMTPat (U64.logor a b)]
let u64_or_1 a b = UInt.logor_lemma_2 (U64.v a)
let u64_1_or (a b:U64.t) :
Lemma (U64.v a = pow2 64 - 1 ==> U64.v (U64.logor a b) = pow2 64 - 1)
[SMTPat (U64.logor a b)] =
u64_logor_comm a b
val u64_or_0 (a b:U64.t) :
Lemma (U64.v a = 0 /\ U64.v b = 0 ==> U64.v (U64.logor a b) = 0)
[SMTPat (U64.logor a b)]
let u64_or_0 a b = UInt.logor_lemma_1 (U64.v a)
val u64_not_0 (a:U64.t) :
Lemma (U64.v a = 0 ==> U64.v (U64.lognot a) = pow2 64 - 1)
[SMTPat (U64.lognot a)]
let u64_not_0 a = UInt.lognot_lemma_1 #64
val u64_not_1 (a:U64.t) :
Lemma (U64.v a = pow2 64 - 1 ==> U64.v (U64.lognot a) = 0)
[SMTPat (U64.lognot a)]
let u64_not_1 a =
UInt.nth_lemma (UInt.lognot #64 (UInt.ones 64)) (UInt.zero 64)
let gte_mask (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a >= v b ==> v r = pow2 128 - 1) /\ (v a < v b ==> v r = 0))) =
let mask_hi_gte = U64.logand (U64.gte_mask a.high b.high)
(U64.lognot (U64.eq_mask a.high b.high)) in
let mask_lo_gte = U64.logand (U64.eq_mask a.high b.high)
(U64.gte_mask a.low b.low) in
let mask = U64.logor mask_hi_gte mask_lo_gte in
gte_characterization a b;
lt_characterization a b;
{ low = mask; high = mask; }
let uint64_to_uint128 (a:U64.t) = { low = a; high = U64.uint_to_t 0; } | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val uint128_to_uint64: a:t -> b:U64.t{U64.v b == v a % pow2 64} | [] | FStar.UInt128.uint128_to_uint64 | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: FStar.UInt128.t -> b: FStar.UInt64.t{FStar.UInt64.v b == FStar.UInt128.v a % Prims.pow2 64} | {
"end_col": 71,
"end_line": 896,
"start_col": 66,
"start_line": 896
} |
Prims.Pure | val shift_left_small (a: t) (s: U32.t)
: Pure t (requires (U32.v s < 64)) (ensures (fun r -> v r = (v a * pow2 (U32.v s)) % pow2 128)) | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let shift_left_small (a: t) (s: U32.t) : Pure t
(requires (U32.v s < 64))
(ensures (fun r -> v r = (v a * pow2 (U32.v s)) % pow2 128)) =
if U32.eq s 0ul then a
else
let r = { low = U64.shift_left a.low s;
high = add_u64_shift_left_respec a.high a.low s; } in
let s = U32.v s in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
mod_spec_rew_n (a_l * pow2 s) (pow2 64);
shift_t_mod_val a s;
r | val shift_left_small (a: t) (s: U32.t)
: Pure t (requires (U32.v s < 64)) (ensures (fun r -> v r = (v a * pow2 (U32.v s)) % pow2 128))
let shift_left_small (a: t) (s: U32.t)
: Pure t (requires (U32.v s < 64)) (ensures (fun r -> v r = (v a * pow2 (U32.v s)) % pow2 128)) = | false | null | false | if U32.eq s 0ul
then a
else
let r = { low = U64.shift_left a.low s; high = add_u64_shift_left_respec a.high a.low s } in
let s = U32.v s in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
mod_spec_rew_n (a_l * pow2 s) (pow2 64);
shift_t_mod_val a s;
r | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [] | [
"FStar.UInt128.t",
"FStar.UInt32.t",
"FStar.UInt32.eq",
"FStar.UInt32.__uint_to_t",
"Prims.bool",
"Prims.unit",
"FStar.UInt128.shift_t_mod_val",
"FStar.UInt128.mod_spec_rew_n",
"FStar.Mul.op_Star",
"Prims.pow2",
"FStar.UInt.uint_t",
"FStar.UInt64.v",
"FStar.UInt128.__proj__Mkuint128__item__high",
"FStar.UInt128.__proj__Mkuint128__item__low",
"FStar.UInt32.v",
"FStar.UInt128.uint128",
"FStar.UInt128.Mkuint128",
"FStar.UInt64.shift_left",
"FStar.UInt128.add_u64_shift_left_respec",
"Prims.b2t",
"Prims.op_LessThan",
"Prims.op_Equality",
"Prims.int",
"FStar.UInt128.v",
"Prims.op_Modulus"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract
let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63
let carry_uint64_ok (a b:uint_t 64)
: squash (int2bv (carry_uint64 a b) == carry_bv a b)
= _ by (T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl())
let fact1 (a b: uint_t 64) = carry_bv a b == int2bv 1
let fact0 (a b: uint_t 64) = carry_bv a b == int2bv 0
let lem_ult_1 (a b: uint_t 64)
: squash (bvult (int2bv a) (int2bv b) ==> fact1 a b)
= assert (bvult (int2bv a) (int2bv b) ==> fact1 a b)
by (T.norm [delta_only [`%fact1;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'";
smt())
let lem_ult_2 (a b:uint_t 64)
: squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
= assert (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
by (T.norm [delta_only [`%fact0;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'")
let int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b))
= introduce (a < b) ==> (bvult #n (int2bv #n a) (int2bv #n b))
with _ . FStar.BV.int2bv_lemma_ult_1 a b;
introduce (bvult #n (int2bv #n a) (int2bv #n b)) ==> (a < b)
with _ . FStar.BV.int2bv_lemma_ult_2 a b
let lem_ult (a b:uint_t 64)
: Lemma (if a < b
then fact1 a b
else fact0 a b)
= int2bv_ult a b;
lem_ult_1 a b;
lem_ult_2 a b
let constant_time_carry (a b: U64.t) : Tot U64.t =
let open U64 in
// CONSTANT_TIME_CARRY macro
// ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
// 63 = sizeof(a) * 8 - 1
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63ul
let carry_uint64_equiv (a b:UInt64.t)
: Lemma (U64.v (constant_time_carry a b) == carry_uint64 (U64.v a) (U64.v b))
= ()
// This type gets a special treatment in KaRaMeL and its definition is never
// printed in the resulting C file.
type uint128: Type0 = { low: U64.t; high: U64.t }
let t = uint128
let _ = intro_ambient n
let _ = intro_ambient t
[@@ noextract_to "krml"]
let v x = U64.v x.low + (U64.v x.high) * (pow2 64)
let div_mod (x:nat) (k:nat{k > 0}) : Lemma (x / k * k + x % k == x) = ()
let uint_to_t x =
div_mod x (pow2 64);
{ low = U64.uint_to_t (x % (pow2 64));
high = U64.uint_to_t (x / (pow2 64)); }
let v_inj (x1 x2: t): Lemma (requires (v x1 == v x2))
(ensures x1 == x2) =
assert (uint_to_t (v x1) == uint_to_t (v x2));
assert (uint_to_t (v x1) == x1);
assert (uint_to_t (v x2) == x2);
()
(* A weird helper used below... seems like the native encoding of
bitvectors may be making these proofs much harder than they should be? *)
let bv2int_fun (#n:pos) (a b : bv_t n)
: Lemma (requires a == b)
(ensures bv2int a == bv2int b)
= ()
(* This proof is quite brittle. It has a bunch of annotations to get
decent verification performance. *)
let constant_time_carry_ok (a b:U64.t)
: Lemma (constant_time_carry a b ==
(if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0))
= calc (==) {
U64.v (constant_time_carry a b);
(==) { carry_uint64_equiv a b }
carry_uint64 (U64.v a) (U64.v b);
(==) { inverse_num_lemma (carry_uint64 (U64.v a) (U64.v b)) }
bv2int (int2bv (carry_uint64 (U64.v a) (U64.v b)));
(==) { carry_uint64_ok (U64.v a) (U64.v b);
bv2int_fun (int2bv (carry_uint64 (U64.v a) (U64.v b))) (carry_bv (U64.v a) (U64.v b));
()
}
bv2int (carry_bv (U64.v a) (U64.v b));
(==) {
lem_ult (U64.v a) (U64.v b);
bv2int_fun (carry_bv (U64.v a) (U64.v b)) (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
}
bv2int
(if U64.v a < U64.v b
then int2bv 1
else int2bv 0);
};
assert (
bv2int (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
== U64.v (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)) by (T.norm []);
U64.v_inj (constant_time_carry a b) (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)
let carry (a b: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r == (if U64.v a < U64.v b then 1 else 0))) =
constant_time_carry_ok a b;
constant_time_carry a b
let carry_sum_ok (a b:U64.t) :
Lemma (U64.v (carry (U64.add_mod a b) b) == (U64.v a + U64.v b) / (pow2 64)) = ()
let add (a b: t) : Pure t
(requires (v a + v b < pow2 128))
(ensures (fun r -> v a + v b = v r)) =
let l = U64.add_mod a.low b.low in
carry_sum_ok a.low b.low;
{ low = l;
high = U64.add (U64.add a.high b.high) (carry l b.low); }
let add_underspec (a b: t) =
let l = U64.add_mod a.low b.low in
begin
if v a + v b < pow2 128
then carry_sum_ok a.low b.low
else ()
end;
{ low = l;
high = U64.add_underspec (U64.add_underspec a.high b.high) (carry l b.low); }
val mod_mod: a:nat -> k:nat{k>0} -> k':nat{k'>0} ->
Lemma ((a % k) % (k'*k) == a % k)
let mod_mod a k k' =
assert (a % k < k);
assert (a % k < k' * k)
let mod_spec (a:nat) (k:nat{k > 0}) :
Lemma (a % k == a - a / k * k) = ()
val div_product : n:nat -> m1:nat{m1>0} -> m2:nat{m2>0} ->
Lemma (n / (m1*m2) == (n / m1) / m2)
let div_product n m1 m2 =
Math.division_multiplication_lemma n m1 m2
val mul_div_cancel : n:nat -> k:nat{k>0} ->
Lemma ((n * k) / k == n)
let mul_div_cancel n k =
Math.cancel_mul_div n k
val mod_mul: n:nat -> k1:pos -> k2:pos ->
Lemma ((n % k2) * k1 == (n * k1) % (k1*k2))
let mod_mul n k1 k2 =
Math.modulo_scale_lemma n k1 k2
let mod_spec_rew_n (n:nat) (k:nat{k > 0}) :
Lemma (n == n / k * k + n % k) = mod_spec n k
val mod_add: n1:nat -> n2:nat -> k:nat{k > 0} ->
Lemma ((n1 % k + n2 % k) % k == (n1 + n2) % k)
let mod_add n1 n2 k = Math.modulo_distributivity n1 n2 k
val mod_add_small: n1:nat -> n2:nat -> k:nat{k > 0} -> Lemma
(requires (n1 % k + n2 % k < k))
(ensures (n1 % k + n2 % k == (n1 + n2) % k))
let mod_add_small n1 n2 k =
mod_add n1 n2 k;
Math.small_modulo_lemma_1 (n1%k + n2%k) k
// This proof is pretty stable with the calc proof, but it can fail
// ~1% of the times, so add a retry.
#push-options "--split_queries no --z3rlimit 20 --retry 5"
let add_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a + v b) % pow2 128 = v r)) =
let l = U64.add_mod a.low b.low in
let r = { low = l;
high = U64.add_mod (U64.add_mod a.high b.high) (carry l b.low)} in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
let b_l = U64.v b.low in
let b_h = U64.v b.high in
carry_sum_ok a.low b.low;
Math.lemma_mod_plus_distr_l (a_h + b_h) ((a_l + b_l) / (pow2 64)) (pow2 64);
calc (==) {
U64.v r.high * pow2 64;
== {}
((a_h + b_h + (a_l + b_l) / (pow2 64)) % pow2 64) * pow2 64;
== { mod_mul (a_h + b_h + (a_l + b_l) / (pow2 64)) (pow2 64) (pow2 64) }
((a_h + b_h + (a_l + b_l) / (pow2 64)) * pow2 64) % (pow2 64 * pow2 64);
== {}
((a_h + b_h + (a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
== {}
(a_h * pow2 64 + b_h * pow2 64 + ((a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
};
assert (U64.v r.low == (U64.v r.low) % pow2 128);
mod_add_small (a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64))
((a_l + b_l) % (pow2 64))
(pow2 128);
assert (U64.v r.low + U64.v r.high * pow2 64 ==
(a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64) + (a_l + b_l) % (pow2 64)) % pow2 128);
mod_spec_rew_n (a_l + b_l) (pow2 64);
assert (v r ==
(a_h * pow2 64 +
b_h * pow2 64 +
a_l + b_l) % pow2 128);
assert_spinoff ((v a + v b) % pow2 128 = v r);
r
#pop-options
#push-options "--retry 5"
let sub (a b: t) : Pure t
(requires (v a - v b >= 0))
(ensures (fun r -> v r = v a - v b)) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub (U64.sub a.high b.high) (carry a.low l); }
#pop-options
let sub_underspec (a b: t) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_underspec (U64.sub_underspec a.high b.high) (carry a.low l); }
let sub_mod_impl (a b: t) : t =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_mod (U64.sub_mod a.high b.high) (carry a.low l); }
#push-options "--retry 10" // flaky
let sub_mod_pos_ok (a b:t) : Lemma
(requires (v a - v b >= 0))
(ensures (v (sub_mod_impl a b) = v a - v b)) =
assert (sub a b == sub_mod_impl a b);
()
#pop-options
val u64_diff_wrap : a:U64.t -> b:U64.t ->
Lemma (requires (U64.v a < U64.v b))
(ensures (U64.v (U64.sub_mod a b) == U64.v a - U64.v b + pow2 64))
let u64_diff_wrap a b = ()
#push-options "--z3rlimit 20"
val sub_mod_wrap1_ok : a:t -> b:t -> Lemma
(requires (v a - v b < 0 /\ U64.v a.low < U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128))
#push-options "--retry 10"
let sub_mod_wrap1_ok a b =
// carry == 1 and subtraction in low wraps
let l = U64.sub_mod a.low b.low in
assert (U64.v (carry a.low l) == 1);
u64_diff_wrap a.low b.low;
// a.high <= b.high since v a < v b;
// case split on equality and strictly less
if U64.v a.high = U64.v b.high then ()
else begin
u64_diff_wrap a.high b.high;
()
end
#pop-options
let sum_lt (a1 a2 b1 b2:nat) : Lemma
(requires (a1 + a2 < b1 + b2 /\ a1 >= b1))
(ensures (a2 < b2)) = ()
let sub_mod_wrap2_ok (a b:t) : Lemma
(requires (v a - v b < 0 /\ U64.v a.low >= U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
// carry == 0, subtraction in low is exact, but subtraction in high
// must wrap since v a < v b
let l = U64.sub_mod a.low b.low in
let r = sub_mod_impl a b in
assert (U64.v l == U64.v a.low - U64.v b.low);
assert (U64.v (carry a.low l) == 0);
sum_lt (U64.v a.low) (U64.v a.high * pow2 64) (U64.v b.low) (U64.v b.high * pow2 64);
assert (U64.v (U64.sub_mod a.high b.high) == U64.v a.high - U64.v b.high + pow2 64);
()
let sub_mod_wrap_ok (a b:t) : Lemma
(requires (v a - v b < 0))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
if U64.v a.low < U64.v b.low
then sub_mod_wrap1_ok a b
else sub_mod_wrap2_ok a b
#push-options "--z3rlimit 40"
let sub_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = (v a - v b) % pow2 128)) =
(if v a - v b >= 0
then sub_mod_pos_ok a b
else sub_mod_wrap_ok a b);
sub_mod_impl a b
#pop-options
val shift_bound : #n:nat -> num:UInt.uint_t n -> n':nat ->
Lemma (num * pow2 n' <= pow2 (n'+n) - pow2 n')
let shift_bound #n num n' =
Math.lemma_mult_le_right (pow2 n') num (pow2 n - 1);
Math.distributivity_sub_left (pow2 n) 1 (pow2 n');
Math.pow2_plus n' n
val append_uint : #n1:nat -> #n2:nat -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 -> UInt.uint_t (n1+n2)
let append_uint #n1 #n2 num1 num2 =
shift_bound num2 n1;
num1 + num2 * pow2 n1
val to_vec_append : #n1:nat{n1 > 0} -> #n2:nat{n2 > 0} -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 ->
Lemma (UInt.to_vec (append_uint num1 num2) == Seq.append (UInt.to_vec num2) (UInt.to_vec num1))
let to_vec_append #n1 #n2 num1 num2 =
UInt.append_lemma (UInt.to_vec num2) (UInt.to_vec num1)
let vec128 (a: t) : BV.bv_t 128 = UInt.to_vec #128 (v a)
let vec64 (a: U64.t) : BV.bv_t 64 = UInt.to_vec (U64.v a)
let to_vec_v (a: t) :
Lemma (vec128 a == Seq.append (vec64 a.high) (vec64 a.low)) =
to_vec_append (U64.v a.low) (U64.v a.high)
val logand_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2) ==
BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2))
(BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logand #128 (v a) (v b))) =
let r = { low = U64.logand a.low b.low;
high = U64.logand a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logand_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logand_vec (vec128 a) (vec128 b));
r
val logxor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2) ==
BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2))
(BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logxor #128 (v a) (v b))) =
let r = { low = U64.logxor a.low b.low;
high = U64.logxor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logxor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logxor_vec (vec128 a) (vec128 b));
r
val logor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2) ==
BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2))
(BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logor #128 (v a) (v b))) =
let r = { low = U64.logor a.low b.low;
high = U64.logor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logor_vec (vec128 a) (vec128 b));
r
val lognot_vec_append (#n1 #n2: pos) (a1: BV.bv_t n1) (a2: BV.bv_t n2) :
Lemma (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2) ==
BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot_vec_append #n1 #n2 a1 a2 =
Seq.lemma_eq_intro (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2))
(BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot (a: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.lognot #128 (v a))) =
let r = { low = U64.lognot a.low;
high = U64.lognot a.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
lognot_vec_append (vec64 a.high) (vec64 a.low);
to_vec_v a;
assert (vec128 r == BV.lognot_vec (vec128 a));
r
let mod_mul_cancel (n:nat) (k:nat{k > 0}) :
Lemma ((n * k) % k == 0) =
mod_spec (n * k) k;
mul_div_cancel n k;
()
let shift_past_mod (n:nat) (k1:nat) (k2:nat{k2 >= k1}) :
Lemma ((n * pow2 k2) % pow2 k1 == 0) =
assert (k2 == (k2 - k1) + k1);
Math.pow2_plus (k2 - k1) k1;
Math.paren_mul_right n (pow2 (k2 - k1)) (pow2 k1);
mod_mul_cancel (n * pow2 (k2 - k1)) (pow2 k1)
val mod_double: a:nat -> k:nat{k>0} ->
Lemma (a % k % k == a % k)
let mod_double a k =
mod_mod a k 1
let shift_left_large_val (#n1:nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s:nat) :
Lemma ((a1 + a2 * pow2 n1) * pow2 s == (a1 * pow2 s + a2 * pow2 (n1+s))) =
Math.distributivity_add_left a1 (a2 * pow2 n1) (pow2 s);
Math.paren_mul_right a2 (pow2 n1) (pow2 s);
Math.pow2_plus n1 s
#push-options "--z3rlimit 40"
let shift_left_large_lemma (#n1: nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s: nat{s >= n2}) :
Lemma (((a1 + a2 * pow2 n1) * pow2 s) % pow2 (n1+n2) ==
(a1 * pow2 s) % pow2 (n1+n2)) =
shift_left_large_val a1 a2 s;
mod_add (a1 * pow2 s) (a2 * pow2 (n1+s)) (pow2 (n1+n2));
shift_past_mod a2 (n1+n2) (n1+s);
mod_double (a1 * pow2 s) (pow2 (n1+n2));
()
#pop-options
val shift_left_large_lemma_t : a:t -> s:nat ->
Lemma (requires (s >= 64))
(ensures ((v a * pow2 s) % pow2 128 ==
(U64.v a.low * pow2 s) % pow2 128))
let shift_left_large_lemma_t a s =
shift_left_large_lemma #64 #64 (U64.v a.low) (U64.v a.high) s
private let u32_64: n:U32.t{U32.v n == 64} = U32.uint_to_t 64
val div_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} -> Lemma
(requires True)
(ensures (a / pow2 (n1 - n2) == a * pow2 n2 / pow2 n1))
let div_pow2_diff a n1 n2 =
Math.pow2_plus n2 (n1-n2);
assert (a * pow2 n2 / pow2 n1 == a * pow2 n2 / (pow2 n2 * pow2 (n1 - n2)));
div_product (a * pow2 n2) (pow2 n2) (pow2 (n1-n2));
mul_div_cancel a (pow2 n2)
val mod_mul_pow2 : n:nat -> e1:nat -> e2:nat ->
Lemma (n % pow2 e1 * pow2 e2 <= pow2 (e1+e2) - pow2 e2)
let mod_mul_pow2 n e1 e2 =
Math.lemma_mod_lt n (pow2 e1);
Math.lemma_mult_le_right (pow2 e2) (n % pow2 e1) (pow2 e1 - 1);
assert (n % pow2 e1 * pow2 e2 <= pow2 e1 * pow2 e2 - pow2 e2);
Math.pow2_plus e1 e2
let pow2_div_bound #b (n:UInt.uint_t b) (s:nat{s <= b}) :
Lemma (n / pow2 s < pow2 (b - s)) =
Math.lemma_div_lt n b s
#push-options "--smtencoding.l_arith_repr native --z3rlimit 40"
let add_u64_shift_left (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r = (U64.v hi * pow2 (U32.v s)) % pow2 64 + U64.v lo / pow2 (64 - U32.v s))) =
let high = U64.shift_left hi s in
let low = U64.shift_right lo (U32.sub u32_64 s) in
let s = U32.v s in
let high_n = U64.v hi % pow2 (64 - s) * pow2 s in
let low_n = U64.v lo / pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 s) (pow2 (64-s));
assert (U64.v high == high_n);
assert (U64.v low == low_n);
pow2_div_bound (U64.v lo) (64-s);
assert (low_n < pow2 s);
mod_mul_pow2 (U64.v hi) (64 - s) s;
U64.add high low
#pop-options
let div_plus_multiple (a:nat) (b:nat) (k:pos) :
Lemma (requires (a < k))
(ensures ((a + b * k) / k == b)) =
Math.division_addition_lemma a k b;
Math.small_division_lemma_1 a k
val div_add_small: n:nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (k1*m / (k1*k2) == (n + k1*m) / (k1*k2)))
let div_add_small n m k1 k2 =
div_product (k1*m) k1 k2;
div_product (n+k1*m) k1 k2;
mul_div_cancel m k1;
assert (k1*m/k1 == m);
div_plus_multiple n m k1
val add_mod_small: n: nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (n + (k1 * m) % (k1 * k2) ==
(n + k1 * m) % (k1 * k2)))
let add_mod_small n m k1 k2 =
mod_spec (k1 * m) (k1 * k2);
mod_spec (n + k1 * m) (k1 * k2);
div_add_small n m k1 k2
let mod_then_mul_64 (n:nat) : Lemma (n % pow2 64 * pow2 64 == n * pow2 64 % pow2 128) =
Math.pow2_plus 64 64;
mod_mul n (pow2 64) (pow2 64)
let mul_abc_to_acb (a b c: int) : Lemma (a * b * c == a * c * b) = ()
let add_u64_shift_left_respec (hi lo:U64.t) (s:U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r ->
U64.v r * pow2 64 ==
(U64.v hi * pow2 64) * pow2 (U32.v s) % pow2 128 +
U64.v lo * pow2 (U32.v s) / pow2 64 * pow2 64)) =
let r = add_u64_shift_left hi lo s in
let hi = U64.v hi in
let lo = U64.v lo in
let s = U32.v s in
// spec of add_u64_shift_left
assert (U64.v r == hi * pow2 s % pow2 64 + lo / pow2 (64 - s));
Math.distributivity_add_left (hi * pow2 s % pow2 64) (lo / pow2 (64-s)) (pow2 64);
mod_then_mul_64 (hi * pow2 s);
assert (hi * pow2 s % pow2 64 * pow2 64 == (hi * pow2 s * pow2 64) % pow2 128);
div_pow2_diff lo 64 s;
assert (lo / pow2 (64-s) == lo * pow2 s / pow2 64);
assert (U64.v r * pow2 64 == hi * pow2 s * pow2 64 % pow2 128 + lo * pow2 s / pow2 64 * pow2 64);
mul_abc_to_acb hi (pow2 s) (pow2 64);
r
val add_mod_small' : n:nat -> m:nat -> k:pos ->
Lemma (requires (n + m % k < k))
(ensures (n + m % k == (n + m) % k))
let add_mod_small' n m k =
Math.lemma_mod_lt (n + m % k) k;
Math.modulo_lemma n k;
mod_add n m k
#push-options "--retry 5"
let shift_t_val (a: t) (s: nat) :
Lemma (v a * pow2 s == U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s)) =
Math.pow2_plus 64 s;
()
#pop-options
val mul_mod_bound : n:nat -> s1:nat -> s2:nat{s2>=s1} ->
Lemma (n * pow2 s1 % pow2 s2 <= pow2 s2 - pow2 s1)
#push-options "--retry 5"
let mul_mod_bound n s1 s2 =
// n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1
// n % pow2 (s2-s1) <= pow2 (s2-s1) - 1
// n % pow2 (s2-s1) * pow2 s1 <= pow2 s2 - pow2 s1
mod_mul n (pow2 s1) (pow2 (s2-s1));
// assert (n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1);
Math.lemma_mod_lt n (pow2 (s2-s1));
Math.lemma_mult_le_right (pow2 s1) (n % pow2 (s2-s1)) (pow2 (s2-s1) - 1);
Math.pow2_plus (s2-s1) s1
#pop-options
let add_lt_le (a a' b b': int) :
Lemma (requires (a < a' /\ b <= b'))
(ensures (a + b < a' + b')) = ()
let u64_pow2_bound (a: UInt.uint_t 64) (s: nat) :
Lemma (a * pow2 s < pow2 (64+s)) =
Math.pow2_plus 64 s;
Math.lemma_mult_le_right (pow2 s) a (pow2 64)
let shift_t_mod_val' (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s) % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
u64_pow2_bound a_l s;
mul_mod_bound a_h (64+s) 128;
// assert (a_h * pow2 (64+s) % pow2 128 <= pow2 128 - pow2 (64+s));
add_lt_le (a_l * pow2 s) (pow2 (64+s)) (a_h * pow2 (64+s) % pow2 128) (pow2 128 - pow2 (64+s));
add_mod_small' (a_l * pow2 s) (a_h * pow2 (64+s)) (pow2 128);
shift_t_val a s;
()
let shift_t_mod_val (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + (U64.v a.high * pow2 64) * pow2 s % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
shift_t_mod_val' a s;
Math.pow2_plus 64 s;
Math.paren_mul_right a_h (pow2 64) (pow2 s);
()
#push-options "--z3rlimit 20"
let shift_left_small (a: t) (s: U32.t) : Pure t
(requires (U32.v s < 64)) | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val shift_left_small (a: t) (s: U32.t)
: Pure t (requires (U32.v s < 64)) (ensures (fun r -> v r = (v a * pow2 (U32.v s)) % pow2 128)) | [] | FStar.UInt128.shift_left_small | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: FStar.UInt128.t -> s: FStar.UInt32.t -> Prims.Pure FStar.UInt128.t | {
"end_col": 5,
"end_line": 689,
"start_col": 2,
"start_line": 680
} |
FStar.Pervasives.Lemma | val product_high32 : x:U64.t -> y:U64.t ->
Lemma ((U64.v x * U64.v y) / pow2 32 == phh x y * pow2 32 + plh x y + phl x y + pll_h x y) | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let product_high32 x y =
Math.pow2_plus 32 32;
product_expand x y;
Math.division_addition_lemma (plh x y + phl x y + pll_h x y) (pow2 32) (phh x y * pow2 32);
mul_div_cancel (phh x y * pow2 32) (pow2 32);
mul_div_cancel (plh x y + phl x y + pll_h x y) (pow2 32);
Math.small_division_lemma_1 (pll_l x y) (pow2 32) | val product_high32 : x:U64.t -> y:U64.t ->
Lemma ((U64.v x * U64.v y) / pow2 32 == phh x y * pow2 32 + plh x y + phl x y + pll_h x y)
let product_high32 x y = | false | null | true | Math.pow2_plus 32 32;
product_expand x y;
Math.division_addition_lemma (plh x y + phl x y + pll_h x y) (pow2 32) (phh x y * pow2 32);
mul_div_cancel (phh x y * pow2 32) (pow2 32);
mul_div_cancel (plh x y + phl x y + pll_h x y) (pow2 32);
Math.small_division_lemma_1 (pll_l x y) (pow2 32) | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [
"lemma"
] | [
"FStar.UInt64.t",
"FStar.Math.Lemmas.small_division_lemma_1",
"FStar.UInt128.pll_l",
"Prims.pow2",
"Prims.unit",
"FStar.UInt128.mul_div_cancel",
"Prims.op_Addition",
"FStar.UInt128.plh",
"FStar.UInt128.phl",
"FStar.UInt128.pll_h",
"FStar.Mul.op_Star",
"FStar.UInt128.phh",
"FStar.Math.Lemmas.division_addition_lemma",
"FStar.UInt128.product_expand",
"FStar.Math.Lemmas.pow2_plus"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract
let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63
let carry_uint64_ok (a b:uint_t 64)
: squash (int2bv (carry_uint64 a b) == carry_bv a b)
= _ by (T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl())
let fact1 (a b: uint_t 64) = carry_bv a b == int2bv 1
let fact0 (a b: uint_t 64) = carry_bv a b == int2bv 0
let lem_ult_1 (a b: uint_t 64)
: squash (bvult (int2bv a) (int2bv b) ==> fact1 a b)
= assert (bvult (int2bv a) (int2bv b) ==> fact1 a b)
by (T.norm [delta_only [`%fact1;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'";
smt())
let lem_ult_2 (a b:uint_t 64)
: squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
= assert (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
by (T.norm [delta_only [`%fact0;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'")
let int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b))
= introduce (a < b) ==> (bvult #n (int2bv #n a) (int2bv #n b))
with _ . FStar.BV.int2bv_lemma_ult_1 a b;
introduce (bvult #n (int2bv #n a) (int2bv #n b)) ==> (a < b)
with _ . FStar.BV.int2bv_lemma_ult_2 a b
let lem_ult (a b:uint_t 64)
: Lemma (if a < b
then fact1 a b
else fact0 a b)
= int2bv_ult a b;
lem_ult_1 a b;
lem_ult_2 a b
let constant_time_carry (a b: U64.t) : Tot U64.t =
let open U64 in
// CONSTANT_TIME_CARRY macro
// ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
// 63 = sizeof(a) * 8 - 1
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63ul
let carry_uint64_equiv (a b:UInt64.t)
: Lemma (U64.v (constant_time_carry a b) == carry_uint64 (U64.v a) (U64.v b))
= ()
// This type gets a special treatment in KaRaMeL and its definition is never
// printed in the resulting C file.
type uint128: Type0 = { low: U64.t; high: U64.t }
let t = uint128
let _ = intro_ambient n
let _ = intro_ambient t
[@@ noextract_to "krml"]
let v x = U64.v x.low + (U64.v x.high) * (pow2 64)
let div_mod (x:nat) (k:nat{k > 0}) : Lemma (x / k * k + x % k == x) = ()
let uint_to_t x =
div_mod x (pow2 64);
{ low = U64.uint_to_t (x % (pow2 64));
high = U64.uint_to_t (x / (pow2 64)); }
let v_inj (x1 x2: t): Lemma (requires (v x1 == v x2))
(ensures x1 == x2) =
assert (uint_to_t (v x1) == uint_to_t (v x2));
assert (uint_to_t (v x1) == x1);
assert (uint_to_t (v x2) == x2);
()
(* A weird helper used below... seems like the native encoding of
bitvectors may be making these proofs much harder than they should be? *)
let bv2int_fun (#n:pos) (a b : bv_t n)
: Lemma (requires a == b)
(ensures bv2int a == bv2int b)
= ()
(* This proof is quite brittle. It has a bunch of annotations to get
decent verification performance. *)
let constant_time_carry_ok (a b:U64.t)
: Lemma (constant_time_carry a b ==
(if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0))
= calc (==) {
U64.v (constant_time_carry a b);
(==) { carry_uint64_equiv a b }
carry_uint64 (U64.v a) (U64.v b);
(==) { inverse_num_lemma (carry_uint64 (U64.v a) (U64.v b)) }
bv2int (int2bv (carry_uint64 (U64.v a) (U64.v b)));
(==) { carry_uint64_ok (U64.v a) (U64.v b);
bv2int_fun (int2bv (carry_uint64 (U64.v a) (U64.v b))) (carry_bv (U64.v a) (U64.v b));
()
}
bv2int (carry_bv (U64.v a) (U64.v b));
(==) {
lem_ult (U64.v a) (U64.v b);
bv2int_fun (carry_bv (U64.v a) (U64.v b)) (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
}
bv2int
(if U64.v a < U64.v b
then int2bv 1
else int2bv 0);
};
assert (
bv2int (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
== U64.v (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)) by (T.norm []);
U64.v_inj (constant_time_carry a b) (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)
let carry (a b: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r == (if U64.v a < U64.v b then 1 else 0))) =
constant_time_carry_ok a b;
constant_time_carry a b
let carry_sum_ok (a b:U64.t) :
Lemma (U64.v (carry (U64.add_mod a b) b) == (U64.v a + U64.v b) / (pow2 64)) = ()
let add (a b: t) : Pure t
(requires (v a + v b < pow2 128))
(ensures (fun r -> v a + v b = v r)) =
let l = U64.add_mod a.low b.low in
carry_sum_ok a.low b.low;
{ low = l;
high = U64.add (U64.add a.high b.high) (carry l b.low); }
let add_underspec (a b: t) =
let l = U64.add_mod a.low b.low in
begin
if v a + v b < pow2 128
then carry_sum_ok a.low b.low
else ()
end;
{ low = l;
high = U64.add_underspec (U64.add_underspec a.high b.high) (carry l b.low); }
val mod_mod: a:nat -> k:nat{k>0} -> k':nat{k'>0} ->
Lemma ((a % k) % (k'*k) == a % k)
let mod_mod a k k' =
assert (a % k < k);
assert (a % k < k' * k)
let mod_spec (a:nat) (k:nat{k > 0}) :
Lemma (a % k == a - a / k * k) = ()
val div_product : n:nat -> m1:nat{m1>0} -> m2:nat{m2>0} ->
Lemma (n / (m1*m2) == (n / m1) / m2)
let div_product n m1 m2 =
Math.division_multiplication_lemma n m1 m2
val mul_div_cancel : n:nat -> k:nat{k>0} ->
Lemma ((n * k) / k == n)
let mul_div_cancel n k =
Math.cancel_mul_div n k
val mod_mul: n:nat -> k1:pos -> k2:pos ->
Lemma ((n % k2) * k1 == (n * k1) % (k1*k2))
let mod_mul n k1 k2 =
Math.modulo_scale_lemma n k1 k2
let mod_spec_rew_n (n:nat) (k:nat{k > 0}) :
Lemma (n == n / k * k + n % k) = mod_spec n k
val mod_add: n1:nat -> n2:nat -> k:nat{k > 0} ->
Lemma ((n1 % k + n2 % k) % k == (n1 + n2) % k)
let mod_add n1 n2 k = Math.modulo_distributivity n1 n2 k
val mod_add_small: n1:nat -> n2:nat -> k:nat{k > 0} -> Lemma
(requires (n1 % k + n2 % k < k))
(ensures (n1 % k + n2 % k == (n1 + n2) % k))
let mod_add_small n1 n2 k =
mod_add n1 n2 k;
Math.small_modulo_lemma_1 (n1%k + n2%k) k
// This proof is pretty stable with the calc proof, but it can fail
// ~1% of the times, so add a retry.
#push-options "--split_queries no --z3rlimit 20 --retry 5"
let add_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a + v b) % pow2 128 = v r)) =
let l = U64.add_mod a.low b.low in
let r = { low = l;
high = U64.add_mod (U64.add_mod a.high b.high) (carry l b.low)} in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
let b_l = U64.v b.low in
let b_h = U64.v b.high in
carry_sum_ok a.low b.low;
Math.lemma_mod_plus_distr_l (a_h + b_h) ((a_l + b_l) / (pow2 64)) (pow2 64);
calc (==) {
U64.v r.high * pow2 64;
== {}
((a_h + b_h + (a_l + b_l) / (pow2 64)) % pow2 64) * pow2 64;
== { mod_mul (a_h + b_h + (a_l + b_l) / (pow2 64)) (pow2 64) (pow2 64) }
((a_h + b_h + (a_l + b_l) / (pow2 64)) * pow2 64) % (pow2 64 * pow2 64);
== {}
((a_h + b_h + (a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
== {}
(a_h * pow2 64 + b_h * pow2 64 + ((a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
};
assert (U64.v r.low == (U64.v r.low) % pow2 128);
mod_add_small (a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64))
((a_l + b_l) % (pow2 64))
(pow2 128);
assert (U64.v r.low + U64.v r.high * pow2 64 ==
(a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64) + (a_l + b_l) % (pow2 64)) % pow2 128);
mod_spec_rew_n (a_l + b_l) (pow2 64);
assert (v r ==
(a_h * pow2 64 +
b_h * pow2 64 +
a_l + b_l) % pow2 128);
assert_spinoff ((v a + v b) % pow2 128 = v r);
r
#pop-options
#push-options "--retry 5"
let sub (a b: t) : Pure t
(requires (v a - v b >= 0))
(ensures (fun r -> v r = v a - v b)) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub (U64.sub a.high b.high) (carry a.low l); }
#pop-options
let sub_underspec (a b: t) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_underspec (U64.sub_underspec a.high b.high) (carry a.low l); }
let sub_mod_impl (a b: t) : t =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_mod (U64.sub_mod a.high b.high) (carry a.low l); }
#push-options "--retry 10" // flaky
let sub_mod_pos_ok (a b:t) : Lemma
(requires (v a - v b >= 0))
(ensures (v (sub_mod_impl a b) = v a - v b)) =
assert (sub a b == sub_mod_impl a b);
()
#pop-options
val u64_diff_wrap : a:U64.t -> b:U64.t ->
Lemma (requires (U64.v a < U64.v b))
(ensures (U64.v (U64.sub_mod a b) == U64.v a - U64.v b + pow2 64))
let u64_diff_wrap a b = ()
#push-options "--z3rlimit 20"
val sub_mod_wrap1_ok : a:t -> b:t -> Lemma
(requires (v a - v b < 0 /\ U64.v a.low < U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128))
#push-options "--retry 10"
let sub_mod_wrap1_ok a b =
// carry == 1 and subtraction in low wraps
let l = U64.sub_mod a.low b.low in
assert (U64.v (carry a.low l) == 1);
u64_diff_wrap a.low b.low;
// a.high <= b.high since v a < v b;
// case split on equality and strictly less
if U64.v a.high = U64.v b.high then ()
else begin
u64_diff_wrap a.high b.high;
()
end
#pop-options
let sum_lt (a1 a2 b1 b2:nat) : Lemma
(requires (a1 + a2 < b1 + b2 /\ a1 >= b1))
(ensures (a2 < b2)) = ()
let sub_mod_wrap2_ok (a b:t) : Lemma
(requires (v a - v b < 0 /\ U64.v a.low >= U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
// carry == 0, subtraction in low is exact, but subtraction in high
// must wrap since v a < v b
let l = U64.sub_mod a.low b.low in
let r = sub_mod_impl a b in
assert (U64.v l == U64.v a.low - U64.v b.low);
assert (U64.v (carry a.low l) == 0);
sum_lt (U64.v a.low) (U64.v a.high * pow2 64) (U64.v b.low) (U64.v b.high * pow2 64);
assert (U64.v (U64.sub_mod a.high b.high) == U64.v a.high - U64.v b.high + pow2 64);
()
let sub_mod_wrap_ok (a b:t) : Lemma
(requires (v a - v b < 0))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
if U64.v a.low < U64.v b.low
then sub_mod_wrap1_ok a b
else sub_mod_wrap2_ok a b
#push-options "--z3rlimit 40"
let sub_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = (v a - v b) % pow2 128)) =
(if v a - v b >= 0
then sub_mod_pos_ok a b
else sub_mod_wrap_ok a b);
sub_mod_impl a b
#pop-options
val shift_bound : #n:nat -> num:UInt.uint_t n -> n':nat ->
Lemma (num * pow2 n' <= pow2 (n'+n) - pow2 n')
let shift_bound #n num n' =
Math.lemma_mult_le_right (pow2 n') num (pow2 n - 1);
Math.distributivity_sub_left (pow2 n) 1 (pow2 n');
Math.pow2_plus n' n
val append_uint : #n1:nat -> #n2:nat -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 -> UInt.uint_t (n1+n2)
let append_uint #n1 #n2 num1 num2 =
shift_bound num2 n1;
num1 + num2 * pow2 n1
val to_vec_append : #n1:nat{n1 > 0} -> #n2:nat{n2 > 0} -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 ->
Lemma (UInt.to_vec (append_uint num1 num2) == Seq.append (UInt.to_vec num2) (UInt.to_vec num1))
let to_vec_append #n1 #n2 num1 num2 =
UInt.append_lemma (UInt.to_vec num2) (UInt.to_vec num1)
let vec128 (a: t) : BV.bv_t 128 = UInt.to_vec #128 (v a)
let vec64 (a: U64.t) : BV.bv_t 64 = UInt.to_vec (U64.v a)
let to_vec_v (a: t) :
Lemma (vec128 a == Seq.append (vec64 a.high) (vec64 a.low)) =
to_vec_append (U64.v a.low) (U64.v a.high)
val logand_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2) ==
BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2))
(BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logand #128 (v a) (v b))) =
let r = { low = U64.logand a.low b.low;
high = U64.logand a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logand_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logand_vec (vec128 a) (vec128 b));
r
val logxor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2) ==
BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2))
(BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logxor #128 (v a) (v b))) =
let r = { low = U64.logxor a.low b.low;
high = U64.logxor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logxor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logxor_vec (vec128 a) (vec128 b));
r
val logor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2) ==
BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2))
(BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logor #128 (v a) (v b))) =
let r = { low = U64.logor a.low b.low;
high = U64.logor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logor_vec (vec128 a) (vec128 b));
r
val lognot_vec_append (#n1 #n2: pos) (a1: BV.bv_t n1) (a2: BV.bv_t n2) :
Lemma (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2) ==
BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot_vec_append #n1 #n2 a1 a2 =
Seq.lemma_eq_intro (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2))
(BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot (a: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.lognot #128 (v a))) =
let r = { low = U64.lognot a.low;
high = U64.lognot a.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
lognot_vec_append (vec64 a.high) (vec64 a.low);
to_vec_v a;
assert (vec128 r == BV.lognot_vec (vec128 a));
r
let mod_mul_cancel (n:nat) (k:nat{k > 0}) :
Lemma ((n * k) % k == 0) =
mod_spec (n * k) k;
mul_div_cancel n k;
()
let shift_past_mod (n:nat) (k1:nat) (k2:nat{k2 >= k1}) :
Lemma ((n * pow2 k2) % pow2 k1 == 0) =
assert (k2 == (k2 - k1) + k1);
Math.pow2_plus (k2 - k1) k1;
Math.paren_mul_right n (pow2 (k2 - k1)) (pow2 k1);
mod_mul_cancel (n * pow2 (k2 - k1)) (pow2 k1)
val mod_double: a:nat -> k:nat{k>0} ->
Lemma (a % k % k == a % k)
let mod_double a k =
mod_mod a k 1
let shift_left_large_val (#n1:nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s:nat) :
Lemma ((a1 + a2 * pow2 n1) * pow2 s == (a1 * pow2 s + a2 * pow2 (n1+s))) =
Math.distributivity_add_left a1 (a2 * pow2 n1) (pow2 s);
Math.paren_mul_right a2 (pow2 n1) (pow2 s);
Math.pow2_plus n1 s
#push-options "--z3rlimit 40"
let shift_left_large_lemma (#n1: nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s: nat{s >= n2}) :
Lemma (((a1 + a2 * pow2 n1) * pow2 s) % pow2 (n1+n2) ==
(a1 * pow2 s) % pow2 (n1+n2)) =
shift_left_large_val a1 a2 s;
mod_add (a1 * pow2 s) (a2 * pow2 (n1+s)) (pow2 (n1+n2));
shift_past_mod a2 (n1+n2) (n1+s);
mod_double (a1 * pow2 s) (pow2 (n1+n2));
()
#pop-options
val shift_left_large_lemma_t : a:t -> s:nat ->
Lemma (requires (s >= 64))
(ensures ((v a * pow2 s) % pow2 128 ==
(U64.v a.low * pow2 s) % pow2 128))
let shift_left_large_lemma_t a s =
shift_left_large_lemma #64 #64 (U64.v a.low) (U64.v a.high) s
private let u32_64: n:U32.t{U32.v n == 64} = U32.uint_to_t 64
val div_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} -> Lemma
(requires True)
(ensures (a / pow2 (n1 - n2) == a * pow2 n2 / pow2 n1))
let div_pow2_diff a n1 n2 =
Math.pow2_plus n2 (n1-n2);
assert (a * pow2 n2 / pow2 n1 == a * pow2 n2 / (pow2 n2 * pow2 (n1 - n2)));
div_product (a * pow2 n2) (pow2 n2) (pow2 (n1-n2));
mul_div_cancel a (pow2 n2)
val mod_mul_pow2 : n:nat -> e1:nat -> e2:nat ->
Lemma (n % pow2 e1 * pow2 e2 <= pow2 (e1+e2) - pow2 e2)
let mod_mul_pow2 n e1 e2 =
Math.lemma_mod_lt n (pow2 e1);
Math.lemma_mult_le_right (pow2 e2) (n % pow2 e1) (pow2 e1 - 1);
assert (n % pow2 e1 * pow2 e2 <= pow2 e1 * pow2 e2 - pow2 e2);
Math.pow2_plus e1 e2
let pow2_div_bound #b (n:UInt.uint_t b) (s:nat{s <= b}) :
Lemma (n / pow2 s < pow2 (b - s)) =
Math.lemma_div_lt n b s
#push-options "--smtencoding.l_arith_repr native --z3rlimit 40"
let add_u64_shift_left (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r = (U64.v hi * pow2 (U32.v s)) % pow2 64 + U64.v lo / pow2 (64 - U32.v s))) =
let high = U64.shift_left hi s in
let low = U64.shift_right lo (U32.sub u32_64 s) in
let s = U32.v s in
let high_n = U64.v hi % pow2 (64 - s) * pow2 s in
let low_n = U64.v lo / pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 s) (pow2 (64-s));
assert (U64.v high == high_n);
assert (U64.v low == low_n);
pow2_div_bound (U64.v lo) (64-s);
assert (low_n < pow2 s);
mod_mul_pow2 (U64.v hi) (64 - s) s;
U64.add high low
#pop-options
let div_plus_multiple (a:nat) (b:nat) (k:pos) :
Lemma (requires (a < k))
(ensures ((a + b * k) / k == b)) =
Math.division_addition_lemma a k b;
Math.small_division_lemma_1 a k
val div_add_small: n:nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (k1*m / (k1*k2) == (n + k1*m) / (k1*k2)))
let div_add_small n m k1 k2 =
div_product (k1*m) k1 k2;
div_product (n+k1*m) k1 k2;
mul_div_cancel m k1;
assert (k1*m/k1 == m);
div_plus_multiple n m k1
val add_mod_small: n: nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (n + (k1 * m) % (k1 * k2) ==
(n + k1 * m) % (k1 * k2)))
let add_mod_small n m k1 k2 =
mod_spec (k1 * m) (k1 * k2);
mod_spec (n + k1 * m) (k1 * k2);
div_add_small n m k1 k2
let mod_then_mul_64 (n:nat) : Lemma (n % pow2 64 * pow2 64 == n * pow2 64 % pow2 128) =
Math.pow2_plus 64 64;
mod_mul n (pow2 64) (pow2 64)
let mul_abc_to_acb (a b c: int) : Lemma (a * b * c == a * c * b) = ()
let add_u64_shift_left_respec (hi lo:U64.t) (s:U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r ->
U64.v r * pow2 64 ==
(U64.v hi * pow2 64) * pow2 (U32.v s) % pow2 128 +
U64.v lo * pow2 (U32.v s) / pow2 64 * pow2 64)) =
let r = add_u64_shift_left hi lo s in
let hi = U64.v hi in
let lo = U64.v lo in
let s = U32.v s in
// spec of add_u64_shift_left
assert (U64.v r == hi * pow2 s % pow2 64 + lo / pow2 (64 - s));
Math.distributivity_add_left (hi * pow2 s % pow2 64) (lo / pow2 (64-s)) (pow2 64);
mod_then_mul_64 (hi * pow2 s);
assert (hi * pow2 s % pow2 64 * pow2 64 == (hi * pow2 s * pow2 64) % pow2 128);
div_pow2_diff lo 64 s;
assert (lo / pow2 (64-s) == lo * pow2 s / pow2 64);
assert (U64.v r * pow2 64 == hi * pow2 s * pow2 64 % pow2 128 + lo * pow2 s / pow2 64 * pow2 64);
mul_abc_to_acb hi (pow2 s) (pow2 64);
r
val add_mod_small' : n:nat -> m:nat -> k:pos ->
Lemma (requires (n + m % k < k))
(ensures (n + m % k == (n + m) % k))
let add_mod_small' n m k =
Math.lemma_mod_lt (n + m % k) k;
Math.modulo_lemma n k;
mod_add n m k
#push-options "--retry 5"
let shift_t_val (a: t) (s: nat) :
Lemma (v a * pow2 s == U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s)) =
Math.pow2_plus 64 s;
()
#pop-options
val mul_mod_bound : n:nat -> s1:nat -> s2:nat{s2>=s1} ->
Lemma (n * pow2 s1 % pow2 s2 <= pow2 s2 - pow2 s1)
#push-options "--retry 5"
let mul_mod_bound n s1 s2 =
// n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1
// n % pow2 (s2-s1) <= pow2 (s2-s1) - 1
// n % pow2 (s2-s1) * pow2 s1 <= pow2 s2 - pow2 s1
mod_mul n (pow2 s1) (pow2 (s2-s1));
// assert (n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1);
Math.lemma_mod_lt n (pow2 (s2-s1));
Math.lemma_mult_le_right (pow2 s1) (n % pow2 (s2-s1)) (pow2 (s2-s1) - 1);
Math.pow2_plus (s2-s1) s1
#pop-options
let add_lt_le (a a' b b': int) :
Lemma (requires (a < a' /\ b <= b'))
(ensures (a + b < a' + b')) = ()
let u64_pow2_bound (a: UInt.uint_t 64) (s: nat) :
Lemma (a * pow2 s < pow2 (64+s)) =
Math.pow2_plus 64 s;
Math.lemma_mult_le_right (pow2 s) a (pow2 64)
let shift_t_mod_val' (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s) % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
u64_pow2_bound a_l s;
mul_mod_bound a_h (64+s) 128;
// assert (a_h * pow2 (64+s) % pow2 128 <= pow2 128 - pow2 (64+s));
add_lt_le (a_l * pow2 s) (pow2 (64+s)) (a_h * pow2 (64+s) % pow2 128) (pow2 128 - pow2 (64+s));
add_mod_small' (a_l * pow2 s) (a_h * pow2 (64+s)) (pow2 128);
shift_t_val a s;
()
let shift_t_mod_val (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + (U64.v a.high * pow2 64) * pow2 s % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
shift_t_mod_val' a s;
Math.pow2_plus 64 s;
Math.paren_mul_right a_h (pow2 64) (pow2 s);
()
#push-options "--z3rlimit 20"
let shift_left_small (a: t) (s: U32.t) : Pure t
(requires (U32.v s < 64))
(ensures (fun r -> v r = (v a * pow2 (U32.v s)) % pow2 128)) =
if U32.eq s 0ul then a
else
let r = { low = U64.shift_left a.low s;
high = add_u64_shift_left_respec a.high a.low s; } in
let s = U32.v s in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
mod_spec_rew_n (a_l * pow2 s) (pow2 64);
shift_t_mod_val a s;
r
#pop-options
val shift_left_large : a:t -> s:U32.t{U32.v s >= 64 /\ U32.v s < 128} ->
r:t{v r = (v a * pow2 (U32.v s)) % pow2 128}
#push-options "--z3rlimit 50 --retry 5" // sporadically fails
let shift_left_large a s =
let h_shift = U32.sub s u32_64 in
assert (U32.v h_shift < 64);
let r = { low = U64.uint_to_t 0;
high = U64.shift_left a.low h_shift; } in
assert (U64.v r.high == (U64.v a.low * pow2 (U32.v s - 64)) % pow2 64);
mod_mul (U64.v a.low * pow2 (U32.v s - 64)) (pow2 64) (pow2 64);
Math.pow2_plus (U32.v s - 64) 64;
assert (U64.v r.high * pow2 64 == (U64.v a.low * pow2 (U32.v s)) % pow2 128);
shift_left_large_lemma_t a (U32.v s);
r
#pop-options
let shift_left a s =
if (U32.lt s u32_64) then shift_left_small a s
else shift_left_large a s
let add_u64_shift_right (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r == U64.v lo / pow2 (U32.v s) +
U64.v hi * pow2 (64 - U32.v s) % pow2 64)) =
let low = U64.shift_right lo s in
let high = U64.shift_left hi (U32.sub u32_64 s) in
let s = U32.v s in
let low_n = U64.v lo / pow2 s in
let high_n = U64.v hi % pow2 s * pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 (64-s)) (pow2 s);
assert (U64.v high == high_n);
pow2_div_bound (U64.v lo) s;
assert (low_n < pow2 (64 - s));
mod_mul_pow2 (U64.v hi) s (64 - s);
U64.add low high
val mul_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} ->
Lemma (a * pow2 (n1 - n2) == a * pow2 n1 / pow2 n2)
let mul_pow2_diff a n1 n2 =
Math.paren_mul_right a (pow2 (n1-n2)) (pow2 n2);
mul_div_cancel (a * pow2 (n1 - n2)) (pow2 n2);
Math.pow2_plus (n1 - n2) n2
let add_u64_shift_right_respec (hi lo:U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r == U64.v lo / pow2 (U32.v s) +
U64.v hi * pow2 64 / pow2 (U32.v s) % pow2 64)) =
let r = add_u64_shift_right hi lo s in
let s = U32.v s in
mul_pow2_diff (U64.v hi) 64 s;
r
let mul_div_spec (n:nat) (k:pos) : Lemma (n / k * k == n - n % k) = ()
let mul_distr_sub (n1 n2:nat) (k:nat) : Lemma ((n1 - n2) * k == n1 * k - n2 * k) = ()
val div_product_comm : n1:nat -> k1:pos -> k2:pos ->
Lemma (n1 / k1 / k2 == n1 / k2 / k1)
let div_product_comm n1 k1 k2 =
div_product n1 k1 k2;
div_product n1 k2 k1
val shift_right_reconstruct : a_h:UInt.uint_t 64 -> s:nat{s < 64} ->
Lemma (a_h * pow2 (64-s) == a_h / pow2 s * pow2 64 + a_h * pow2 64 / pow2 s % pow2 64)
let shift_right_reconstruct a_h s =
mul_pow2_diff a_h 64 s;
mod_spec_rew_n (a_h * pow2 (64-s)) (pow2 64);
div_product_comm (a_h * pow2 64) (pow2 s) (pow2 64);
mul_div_cancel a_h (pow2 64);
assert (a_h / pow2 s * pow2 64 == a_h * pow2 64 / pow2 s / pow2 64 * pow2 64);
()
val u128_div_pow2 (a: t) (s:nat{s < 64}) :
Lemma (v a / pow2 s == U64.v a.low / pow2 s + U64.v a.high * pow2 (64 - s))
let u128_div_pow2 a s =
Math.pow2_plus (64-s) s;
Math.paren_mul_right (U64.v a.high) (pow2 (64-s)) (pow2 s);
Math.division_addition_lemma (U64.v a.low) (pow2 s) (U64.v a.high * pow2 (64 - s))
let shift_right_small (a: t) (s: U32.t{U32.v s < 64}) : Pure t
(requires True)
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
if U32.eq s 0ul then a
else
let r = { low = add_u64_shift_right_respec a.high a.low s;
high = U64.shift_right a.high s; } in
let a_h = U64.v a.high in
let a_l = U64.v a.low in
let s = U32.v s in
shift_right_reconstruct a_h s;
assert (v r == a_h * pow2 (64-s) + a_l / pow2 s);
u128_div_pow2 a s;
r
let shift_right_large (a: t) (s: U32.t{U32.v s >= 64 /\ U32.v s < 128}) : Pure t
(requires True)
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
let r = { high = U64.uint_to_t 0;
low = U64.shift_right a.high (U32.sub s u32_64); } in
let s = U32.v s in
Math.pow2_plus 64 (s - 64);
div_product (v a) (pow2 64) (pow2 (s - 64));
assert (v a / pow2 s == v a / pow2 64 / pow2 (s - 64));
div_plus_multiple (U64.v a.low) (U64.v a.high) (pow2 64);
r
let shift_right (a: t) (s: U32.t) : Pure t
(requires (U32.v s < 128))
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
if U32.lt s u32_64
then shift_right_small a s
else shift_right_large a s
let eq (a b:t) = U64.eq a.low b.low && U64.eq a.high b.high
let gt (a b:t) = U64.gt a.high b.high ||
(U64.eq a.high b.high && U64.gt a.low b.low)
let lt (a b:t) = U64.lt a.high b.high ||
(U64.eq a.high b.high && U64.lt a.low b.low)
let gte (a b:t) = U64.gt a.high b.high ||
(U64.eq a.high b.high && U64.gte a.low b.low)
let lte (a b:t) = U64.lt a.high b.high ||
(U64.eq a.high b.high && U64.lte a.low b.low)
let u64_logand_comm (a b:U64.t) : Lemma (U64.logand a b == U64.logand b a) =
UInt.logand_commutative (U64.v a) (U64.v b)
val u64_and_0 (a b:U64.t) :
Lemma (U64.v b = 0 ==> U64.v (U64.logand a b) = 0)
[SMTPat (U64.logand a b)]
let u64_and_0 a b = UInt.logand_lemma_1 (U64.v a)
let u64_0_and (a b:U64.t) :
Lemma (U64.v a = 0 ==> U64.v (U64.logand a b) = 0)
[SMTPat (U64.logand a b)] =
u64_logand_comm a b
val u64_1s_and (a b:U64.t) :
Lemma (U64.v a = pow2 64 - 1 /\
U64.v b = pow2 64 - 1 ==> U64.v (U64.logand a b) = pow2 64 - 1)
[SMTPat (U64.logand a b)]
let u64_1s_and a b = UInt.logand_lemma_2 (U64.v a)
let eq_mask (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a = v b ==> v r = pow2 128 - 1) /\ (v a <> v b ==> v r = 0))) =
let mask = U64.logand (U64.eq_mask a.low b.low)
(U64.eq_mask a.high b.high) in
{ low = mask; high = mask; }
private let gte_characterization (a b: t) :
Lemma (v a >= v b ==>
U64.v a.high > U64.v b.high \/
(U64.v a.high = U64.v b.high /\ U64.v a.low >= U64.v b.low)) = ()
private let lt_characterization (a b: t) :
Lemma (v a < v b ==>
U64.v a.high < U64.v b.high \/
(U64.v a.high = U64.v b.high /\ U64.v a.low < U64.v b.low)) = ()
let u64_logor_comm (a b:U64.t) : Lemma (U64.logor a b == U64.logor b a) =
UInt.logor_commutative (U64.v a) (U64.v b)
val u64_or_1 (a b:U64.t) :
Lemma (U64.v b = pow2 64 - 1 ==> U64.v (U64.logor a b) = pow2 64 - 1)
[SMTPat (U64.logor a b)]
let u64_or_1 a b = UInt.logor_lemma_2 (U64.v a)
let u64_1_or (a b:U64.t) :
Lemma (U64.v a = pow2 64 - 1 ==> U64.v (U64.logor a b) = pow2 64 - 1)
[SMTPat (U64.logor a b)] =
u64_logor_comm a b
val u64_or_0 (a b:U64.t) :
Lemma (U64.v a = 0 /\ U64.v b = 0 ==> U64.v (U64.logor a b) = 0)
[SMTPat (U64.logor a b)]
let u64_or_0 a b = UInt.logor_lemma_1 (U64.v a)
val u64_not_0 (a:U64.t) :
Lemma (U64.v a = 0 ==> U64.v (U64.lognot a) = pow2 64 - 1)
[SMTPat (U64.lognot a)]
let u64_not_0 a = UInt.lognot_lemma_1 #64
val u64_not_1 (a:U64.t) :
Lemma (U64.v a = pow2 64 - 1 ==> U64.v (U64.lognot a) = 0)
[SMTPat (U64.lognot a)]
let u64_not_1 a =
UInt.nth_lemma (UInt.lognot #64 (UInt.ones 64)) (UInt.zero 64)
let gte_mask (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a >= v b ==> v r = pow2 128 - 1) /\ (v a < v b ==> v r = 0))) =
let mask_hi_gte = U64.logand (U64.gte_mask a.high b.high)
(U64.lognot (U64.eq_mask a.high b.high)) in
let mask_lo_gte = U64.logand (U64.eq_mask a.high b.high)
(U64.gte_mask a.low b.low) in
let mask = U64.logor mask_hi_gte mask_lo_gte in
gte_characterization a b;
lt_characterization a b;
{ low = mask; high = mask; }
let uint64_to_uint128 (a:U64.t) = { low = a; high = U64.uint_to_t 0; }
let uint128_to_uint64 (a:t) : b:U64.t{U64.v b == v a % pow2 64} = a.low
inline_for_extraction
let u64_l32_mask: x:U64.t{U64.v x == pow2 32 - 1} = U64.uint_to_t 0xffffffff
let u64_mod_32 (a: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r = U64.v a % pow2 32)) =
UInt.logand_mask (U64.v a) 32;
U64.logand a u64_l32_mask
let u64_32_digits (a: U64.t) : Lemma (U64.v a / pow2 32 * pow2 32 + U64.v a % pow2 32 == U64.v a) =
div_mod (U64.v a) (pow2 32)
val mul32_digits : x:UInt.uint_t 64 -> y:UInt.uint_t 32 ->
Lemma (x * y == (x / pow2 32 * y) * pow2 32 + (x % pow2 32) * y)
let mul32_digits x y = ()
let u32_32 : x:U32.t{U32.v x == 32} = U32.uint_to_t 32
#push-options "--z3rlimit 30"
let u32_combine (hi lo: U64.t) : Pure U64.t
(requires (U64.v lo < pow2 32))
(ensures (fun r -> U64.v r = U64.v hi % pow2 32 * pow2 32 + U64.v lo)) =
U64.add lo (U64.shift_left hi u32_32)
#pop-options
let product_bound (a b:nat) (k:pos) :
Lemma (requires (a < k /\ b < k))
(ensures a * b <= k * k - 2*k + 1) =
Math.lemma_mult_le_right b a (k-1);
Math.lemma_mult_le_left (k-1) b (k-1)
val uint_product_bound : #n:nat -> a:UInt.uint_t n -> b:UInt.uint_t n ->
Lemma (a * b <= pow2 (2*n) - 2*(pow2 n) + 1)
let uint_product_bound #n a b =
product_bound a b (pow2 n);
Math.pow2_plus n n
val u32_product_bound : a:nat{a < pow2 32} -> b:nat{b < pow2 32} ->
Lemma (UInt.size (a * b) 64 /\ a * b < pow2 64 - pow2 32 - 1)
let u32_product_bound a b =
uint_product_bound #32 a b
let mul32 x y =
let x0 = u64_mod_32 x in
let x1 = U64.shift_right x u32_32 in
u32_product_bound (U64.v x0) (U32.v y);
let x0y = U64.mul x0 (FStar.Int.Cast.uint32_to_uint64 y) in
let x0yl = u64_mod_32 x0y in
let x0yh = U64.shift_right x0y u32_32 in
u32_product_bound (U64.v x1) (U32.v y);
// not in the original C code
let x1y' = U64.mul x1 (FStar.Int.Cast.uint32_to_uint64 y) in
let x1y = U64.add x1y' x0yh in
// correspondence with C:
// r0 = r.low
// r0 is written using u32_combine hi lo = lo + hi << 32
// r1 = r.high
let r = { low = u32_combine x1y x0yl;
high = U64.shift_right x1y u32_32; } in
u64_32_digits x;
//assert (U64.v x == U64.v x1 * pow2 32 + U64.v x0);
assert (U64.v x0y == U64.v x0 * U32.v y);
u64_32_digits x0y;
//assert (U64.v x0y == U64.v x0yh * pow2 32 + U64.v x0yl);
assert (U64.v x1y' == U64.v x / pow2 32 * U32.v y);
mul32_digits (U64.v x) (U32.v y);
assert (U64.v x * U32.v y == U64.v x1y' * pow2 32 + U64.v x0y);
r
let l32 (x: UInt.uint_t 64) : UInt.uint_t 32 = x % pow2 32
let h32 (x: UInt.uint_t 64) : UInt.uint_t 32 = x / pow2 32
val mul32_bound : x:UInt.uint_t 32 -> y:UInt.uint_t 32 ->
n:UInt.uint_t 64{n < pow2 64 - pow2 32 - 1 /\ n == x * y}
let mul32_bound x y =
u32_product_bound x y;
x * y
let pll (x y: U64.t) : n:UInt.uint_t 64{n < pow2 64 - pow2 32 - 1} =
mul32_bound (l32 (U64.v x)) (l32 (U64.v y))
let plh (x y: U64.t) : n:UInt.uint_t 64{n < pow2 64 - pow2 32 - 1} =
mul32_bound (l32 (U64.v x)) (h32 (U64.v y))
let phl (x y: U64.t) : n:UInt.uint_t 64{n < pow2 64 - pow2 32 - 1} =
mul32_bound (h32 (U64.v x)) (l32 (U64.v y))
let phh (x y: U64.t) : n:UInt.uint_t 64{n < pow2 64 - pow2 32 - 1} =
mul32_bound (h32 (U64.v x)) (h32 (U64.v y))
let pll_l (x y: U64.t) : UInt.uint_t 32 =
l32 (pll x y)
let pll_h (x y: U64.t) : UInt.uint_t 32 =
h32 (pll x y)
let mul_wide_low (x y: U64.t) = (plh x y + (phl x y + pll_h x y) % pow2 32) * pow2 32 % pow2 64 + pll_l x y
let mul_wide_high (x y: U64.t) =
phh x y +
(phl x y + pll_h x y) / pow2 32 +
(plh x y + (phl x y + pll_h x y) % pow2 32) / pow2 32
inline_for_extraction noextract
let mul_wide_impl_t' (x y: U64.t) : Pure (tuple4 U64.t U64.t U64.t U64.t)
(requires True)
(ensures (fun r -> let (u1, w3, x', t') = r in
U64.v u1 == U64.v x % pow2 32 /\
U64.v w3 == pll_l x y /\
U64.v x' == h32 (U64.v x) /\
U64.v t' == phl x y + pll_h x y)) =
let u1 = u64_mod_32 x in
let v1 = u64_mod_32 y in
u32_product_bound (U64.v u1) (U64.v v1);
let t = U64.mul u1 v1 in
assert (U64.v t == pll x y);
let w3 = u64_mod_32 t in
assert (U64.v w3 == pll_l x y);
let k = U64.shift_right t u32_32 in
assert (U64.v k == pll_h x y);
let x' = U64.shift_right x u32_32 in
assert (U64.v x' == h32 (U64.v x));
u32_product_bound (U64.v x') (U64.v v1);
let t' = U64.add (U64.mul x' v1) k in
(u1, w3, x', t')
// similar to u32_combine, but use % 2^64 * 2^32
let u32_combine' (hi lo: U64.t) : Pure U64.t
(requires (U64.v lo < pow2 32))
(ensures (fun r -> U64.v r = U64.v hi * pow2 32 % pow2 64 + U64.v lo)) =
U64.add lo (U64.shift_left hi u32_32)
inline_for_extraction noextract
let mul_wide_impl (x: U64.t) (y: U64.t) :
Tot (r:t{U64.v r.low == mul_wide_low x y /\
U64.v r.high == mul_wide_high x y % pow2 64}) =
let (u1, w3, x', t') = mul_wide_impl_t' x y in
let k' = u64_mod_32 t' in
let w1 = U64.shift_right t' u32_32 in
assert (U64.v w1 == (phl x y + pll_h x y) / pow2 32);
let y' = U64.shift_right y u32_32 in
assert (U64.v y' == h32 (U64.v y));
u32_product_bound (U64.v u1) (U64.v y');
let t'' = U64.add (U64.mul u1 y') k' in
assert (U64.v t'' == plh x y + (phl x y + pll_h x y) % pow2 32);
let k'' = U64.shift_right t'' u32_32 in
assert (U64.v k'' == (plh x y + (phl x y + pll_h x y) % pow2 32) / pow2 32);
u32_product_bound (U64.v x') (U64.v y');
mod_mul_pow2 (U64.v t'') 32 64;
let r0 = u32_combine' t'' w3 in
// let r0 = U64.add (U64.shift_left t'' u32_32) w3 in
assert (U64.v r0 == (plh x y + (phl x y + pll_h x y) % pow2 32) * pow2 32 % pow2 64 + pll_l x y);
let xy_w1 = U64.add (U64.mul x' y') w1 in
assert (U64.v xy_w1 == phh x y + (phl x y + pll_h x y) / pow2 32);
let r1 = U64.add_mod xy_w1 k'' in
assert (U64.v r1 == (phh x y + (phl x y + pll_h x y) / pow2 32 + (plh x y + (phl x y + pll_h x y) % pow2 32) / pow2 32) % pow2 64);
let r = { low = r0; high = r1; } in
r
let product_sums (a b c d:nat) :
Lemma ((a + b) * (c + d) == a * c + a * d + b * c + b * d) = ()
val u64_32_product (xl xh yl yh:UInt.uint_t 32) :
Lemma ((xl + xh * pow2 32) * (yl + yh * pow2 32) ==
xl * yl + (xl * yh) * pow2 32 + (xh * yl) * pow2 32 + (xh * yh) * pow2 64)
#push-options "--z3rlimit 25"
let u64_32_product xl xh yl yh =
assert (xh >= 0); //flakiness; without this, can't prove that (xh * pow2 32) >= 0
assert (pow2 32 >= 0); //flakiness; without this, can't prove that (xh * pow2 32) >= 0
assert (xh*pow2 32 >= 0);
product_sums xl (xh*pow2 32) yl (yh*pow2 32);
mul_abc_to_acb xh (pow2 32) yl;
assert (xl * (yh * pow2 32) == (xl * yh) * pow2 32);
Math.pow2_plus 32 32;
assert ((xh * pow2 32) * (yh * pow2 32) == (xh * yh) * pow2 64)
#pop-options
let product_expand (x y: U64.t) :
Lemma (U64.v x * U64.v y == phh x y * pow2 64 +
(plh x y + phl x y + pll_h x y) * pow2 32 +
pll_l x y) =
assert (U64.v x == l32 (U64.v x) + h32 (U64.v x) * pow2 32);
assert (U64.v y == l32 (U64.v y) + h32 (U64.v y) * pow2 32);
u64_32_product (l32 (U64.v x)) (h32 (U64.v x)) (l32 (U64.v y)) (h32 (U64.v y))
let product_low_expand (x y: U64.t) :
Lemma ((U64.v x * U64.v y) % pow2 64 ==
((plh x y + phl x y + pll_h x y) * pow2 32 + pll_l x y) % pow2 64) =
product_expand x y;
Math.lemma_mod_plus ((plh x y + phl x y + pll_h x y) * pow2 32 + pll_l x y) (phh x y) (pow2 64)
let add_mod_then_mod (n m:nat) (k:pos) :
Lemma ((n + m % k) % k == (n + m) % k) =
mod_add n m k;
mod_add n (m % k) k;
mod_double m k
let shift_add (n:nat) (m:nat{m < pow2 32}) :
Lemma (n * pow2 32 % pow2 64 + m == (n * pow2 32 + m) % pow2 64) =
add_mod_small' m (n*pow2 32) (pow2 64)
let mul_wide_low_ok (x y: U64.t) :
Lemma (mul_wide_low x y == (U64.v x * U64.v y) % pow2 64) =
Math.pow2_plus 32 32;
mod_mul (plh x y + (phl x y + pll_h x y) % pow2 32) (pow2 32) (pow2 32);
assert (mul_wide_low x y ==
(plh x y + (phl x y + pll_h x y) % pow2 32) % pow2 32 * pow2 32 + pll_l x y);
add_mod_then_mod (plh x y) (phl x y + pll_h x y) (pow2 32);
assert (mul_wide_low x y == (plh x y + phl x y + pll_h x y) % pow2 32 * pow2 32 + pll_l x y);
mod_mul (plh x y + phl x y + pll_h x y) (pow2 32) (pow2 32);
shift_add (plh x y + phl x y + pll_h x y) (pll_l x y);
assert (mul_wide_low x y == ((plh x y + phl x y + pll_h x y) * pow2 32 + pll_l x y) % pow2 64);
product_low_expand x y
val product_high32 : x:U64.t -> y:U64.t ->
Lemma ((U64.v x * U64.v y) / pow2 32 == phh x y * pow2 32 + plh x y + phl x y + pll_h x y)
#push-options "--z3rlimit 20" | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val product_high32 : x:U64.t -> y:U64.t ->
Lemma ((U64.v x * U64.v y) / pow2 32 == phh x y * pow2 32 + plh x y + phl x y + pll_h x y) | [] | FStar.UInt128.product_high32 | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | x: FStar.UInt64.t -> y: FStar.UInt64.t
-> FStar.Pervasives.Lemma
(ensures
FStar.UInt64.v x * FStar.UInt64.v y / Prims.pow2 32 ==
FStar.UInt128.phh x y * Prims.pow2 32 + FStar.UInt128.plh x y + FStar.UInt128.phl x y +
FStar.UInt128.pll_h x y) | {
"end_col": 51,
"end_line": 1117,
"start_col": 2,
"start_line": 1112
} |
Prims.Tot | val shift_left_large : a:t -> s:U32.t{U32.v s >= 64 /\ U32.v s < 128} ->
r:t{v r = (v a * pow2 (U32.v s)) % pow2 128} | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let shift_left_large a s =
let h_shift = U32.sub s u32_64 in
assert (U32.v h_shift < 64);
let r = { low = U64.uint_to_t 0;
high = U64.shift_left a.low h_shift; } in
assert (U64.v r.high == (U64.v a.low * pow2 (U32.v s - 64)) % pow2 64);
mod_mul (U64.v a.low * pow2 (U32.v s - 64)) (pow2 64) (pow2 64);
Math.pow2_plus (U32.v s - 64) 64;
assert (U64.v r.high * pow2 64 == (U64.v a.low * pow2 (U32.v s)) % pow2 128);
shift_left_large_lemma_t a (U32.v s);
r | val shift_left_large : a:t -> s:U32.t{U32.v s >= 64 /\ U32.v s < 128} ->
r:t{v r = (v a * pow2 (U32.v s)) % pow2 128}
let shift_left_large a s = | false | null | false | let h_shift = U32.sub s u32_64 in
assert (U32.v h_shift < 64);
let r = { low = U64.uint_to_t 0; high = U64.shift_left a.low h_shift } in
assert (U64.v r.high == (U64.v a.low * pow2 (U32.v s - 64)) % pow2 64);
mod_mul (U64.v a.low * pow2 (U32.v s - 64)) (pow2 64) (pow2 64);
Math.pow2_plus (U32.v s - 64) 64;
assert (U64.v r.high * pow2 64 == (U64.v a.low * pow2 (U32.v s)) % pow2 128);
shift_left_large_lemma_t a (U32.v s);
r | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [
"total"
] | [
"FStar.UInt128.t",
"FStar.UInt32.t",
"Prims.l_and",
"Prims.b2t",
"Prims.op_GreaterThanOrEqual",
"FStar.UInt32.v",
"Prims.op_LessThan",
"Prims.unit",
"FStar.UInt128.shift_left_large_lemma_t",
"Prims._assert",
"Prims.eq2",
"Prims.int",
"FStar.Mul.op_Star",
"FStar.UInt64.v",
"FStar.UInt128.__proj__Mkuint128__item__high",
"Prims.pow2",
"Prims.op_Modulus",
"FStar.UInt128.__proj__Mkuint128__item__low",
"FStar.Math.Lemmas.pow2_plus",
"Prims.op_Subtraction",
"FStar.UInt128.mod_mul",
"FStar.UInt128.uint128",
"FStar.UInt128.Mkuint128",
"FStar.UInt64.uint_to_t",
"FStar.UInt64.shift_left",
"FStar.UInt32.sub",
"FStar.UInt128.u32_64",
"Prims.op_Equality",
"FStar.UInt128.v"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract
let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63
let carry_uint64_ok (a b:uint_t 64)
: squash (int2bv (carry_uint64 a b) == carry_bv a b)
= _ by (T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl())
let fact1 (a b: uint_t 64) = carry_bv a b == int2bv 1
let fact0 (a b: uint_t 64) = carry_bv a b == int2bv 0
let lem_ult_1 (a b: uint_t 64)
: squash (bvult (int2bv a) (int2bv b) ==> fact1 a b)
= assert (bvult (int2bv a) (int2bv b) ==> fact1 a b)
by (T.norm [delta_only [`%fact1;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'";
smt())
let lem_ult_2 (a b:uint_t 64)
: squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
= assert (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
by (T.norm [delta_only [`%fact0;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'")
let int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b))
= introduce (a < b) ==> (bvult #n (int2bv #n a) (int2bv #n b))
with _ . FStar.BV.int2bv_lemma_ult_1 a b;
introduce (bvult #n (int2bv #n a) (int2bv #n b)) ==> (a < b)
with _ . FStar.BV.int2bv_lemma_ult_2 a b
let lem_ult (a b:uint_t 64)
: Lemma (if a < b
then fact1 a b
else fact0 a b)
= int2bv_ult a b;
lem_ult_1 a b;
lem_ult_2 a b
let constant_time_carry (a b: U64.t) : Tot U64.t =
let open U64 in
// CONSTANT_TIME_CARRY macro
// ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
// 63 = sizeof(a) * 8 - 1
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63ul
let carry_uint64_equiv (a b:UInt64.t)
: Lemma (U64.v (constant_time_carry a b) == carry_uint64 (U64.v a) (U64.v b))
= ()
// This type gets a special treatment in KaRaMeL and its definition is never
// printed in the resulting C file.
type uint128: Type0 = { low: U64.t; high: U64.t }
let t = uint128
let _ = intro_ambient n
let _ = intro_ambient t
[@@ noextract_to "krml"]
let v x = U64.v x.low + (U64.v x.high) * (pow2 64)
let div_mod (x:nat) (k:nat{k > 0}) : Lemma (x / k * k + x % k == x) = ()
let uint_to_t x =
div_mod x (pow2 64);
{ low = U64.uint_to_t (x % (pow2 64));
high = U64.uint_to_t (x / (pow2 64)); }
let v_inj (x1 x2: t): Lemma (requires (v x1 == v x2))
(ensures x1 == x2) =
assert (uint_to_t (v x1) == uint_to_t (v x2));
assert (uint_to_t (v x1) == x1);
assert (uint_to_t (v x2) == x2);
()
(* A weird helper used below... seems like the native encoding of
bitvectors may be making these proofs much harder than they should be? *)
let bv2int_fun (#n:pos) (a b : bv_t n)
: Lemma (requires a == b)
(ensures bv2int a == bv2int b)
= ()
(* This proof is quite brittle. It has a bunch of annotations to get
decent verification performance. *)
let constant_time_carry_ok (a b:U64.t)
: Lemma (constant_time_carry a b ==
(if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0))
= calc (==) {
U64.v (constant_time_carry a b);
(==) { carry_uint64_equiv a b }
carry_uint64 (U64.v a) (U64.v b);
(==) { inverse_num_lemma (carry_uint64 (U64.v a) (U64.v b)) }
bv2int (int2bv (carry_uint64 (U64.v a) (U64.v b)));
(==) { carry_uint64_ok (U64.v a) (U64.v b);
bv2int_fun (int2bv (carry_uint64 (U64.v a) (U64.v b))) (carry_bv (U64.v a) (U64.v b));
()
}
bv2int (carry_bv (U64.v a) (U64.v b));
(==) {
lem_ult (U64.v a) (U64.v b);
bv2int_fun (carry_bv (U64.v a) (U64.v b)) (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
}
bv2int
(if U64.v a < U64.v b
then int2bv 1
else int2bv 0);
};
assert (
bv2int (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
== U64.v (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)) by (T.norm []);
U64.v_inj (constant_time_carry a b) (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)
let carry (a b: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r == (if U64.v a < U64.v b then 1 else 0))) =
constant_time_carry_ok a b;
constant_time_carry a b
let carry_sum_ok (a b:U64.t) :
Lemma (U64.v (carry (U64.add_mod a b) b) == (U64.v a + U64.v b) / (pow2 64)) = ()
let add (a b: t) : Pure t
(requires (v a + v b < pow2 128))
(ensures (fun r -> v a + v b = v r)) =
let l = U64.add_mod a.low b.low in
carry_sum_ok a.low b.low;
{ low = l;
high = U64.add (U64.add a.high b.high) (carry l b.low); }
let add_underspec (a b: t) =
let l = U64.add_mod a.low b.low in
begin
if v a + v b < pow2 128
then carry_sum_ok a.low b.low
else ()
end;
{ low = l;
high = U64.add_underspec (U64.add_underspec a.high b.high) (carry l b.low); }
val mod_mod: a:nat -> k:nat{k>0} -> k':nat{k'>0} ->
Lemma ((a % k) % (k'*k) == a % k)
let mod_mod a k k' =
assert (a % k < k);
assert (a % k < k' * k)
let mod_spec (a:nat) (k:nat{k > 0}) :
Lemma (a % k == a - a / k * k) = ()
val div_product : n:nat -> m1:nat{m1>0} -> m2:nat{m2>0} ->
Lemma (n / (m1*m2) == (n / m1) / m2)
let div_product n m1 m2 =
Math.division_multiplication_lemma n m1 m2
val mul_div_cancel : n:nat -> k:nat{k>0} ->
Lemma ((n * k) / k == n)
let mul_div_cancel n k =
Math.cancel_mul_div n k
val mod_mul: n:nat -> k1:pos -> k2:pos ->
Lemma ((n % k2) * k1 == (n * k1) % (k1*k2))
let mod_mul n k1 k2 =
Math.modulo_scale_lemma n k1 k2
let mod_spec_rew_n (n:nat) (k:nat{k > 0}) :
Lemma (n == n / k * k + n % k) = mod_spec n k
val mod_add: n1:nat -> n2:nat -> k:nat{k > 0} ->
Lemma ((n1 % k + n2 % k) % k == (n1 + n2) % k)
let mod_add n1 n2 k = Math.modulo_distributivity n1 n2 k
val mod_add_small: n1:nat -> n2:nat -> k:nat{k > 0} -> Lemma
(requires (n1 % k + n2 % k < k))
(ensures (n1 % k + n2 % k == (n1 + n2) % k))
let mod_add_small n1 n2 k =
mod_add n1 n2 k;
Math.small_modulo_lemma_1 (n1%k + n2%k) k
// This proof is pretty stable with the calc proof, but it can fail
// ~1% of the times, so add a retry.
#push-options "--split_queries no --z3rlimit 20 --retry 5"
let add_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a + v b) % pow2 128 = v r)) =
let l = U64.add_mod a.low b.low in
let r = { low = l;
high = U64.add_mod (U64.add_mod a.high b.high) (carry l b.low)} in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
let b_l = U64.v b.low in
let b_h = U64.v b.high in
carry_sum_ok a.low b.low;
Math.lemma_mod_plus_distr_l (a_h + b_h) ((a_l + b_l) / (pow2 64)) (pow2 64);
calc (==) {
U64.v r.high * pow2 64;
== {}
((a_h + b_h + (a_l + b_l) / (pow2 64)) % pow2 64) * pow2 64;
== { mod_mul (a_h + b_h + (a_l + b_l) / (pow2 64)) (pow2 64) (pow2 64) }
((a_h + b_h + (a_l + b_l) / (pow2 64)) * pow2 64) % (pow2 64 * pow2 64);
== {}
((a_h + b_h + (a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
== {}
(a_h * pow2 64 + b_h * pow2 64 + ((a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
};
assert (U64.v r.low == (U64.v r.low) % pow2 128);
mod_add_small (a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64))
((a_l + b_l) % (pow2 64))
(pow2 128);
assert (U64.v r.low + U64.v r.high * pow2 64 ==
(a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64) + (a_l + b_l) % (pow2 64)) % pow2 128);
mod_spec_rew_n (a_l + b_l) (pow2 64);
assert (v r ==
(a_h * pow2 64 +
b_h * pow2 64 +
a_l + b_l) % pow2 128);
assert_spinoff ((v a + v b) % pow2 128 = v r);
r
#pop-options
#push-options "--retry 5"
let sub (a b: t) : Pure t
(requires (v a - v b >= 0))
(ensures (fun r -> v r = v a - v b)) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub (U64.sub a.high b.high) (carry a.low l); }
#pop-options
let sub_underspec (a b: t) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_underspec (U64.sub_underspec a.high b.high) (carry a.low l); }
let sub_mod_impl (a b: t) : t =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_mod (U64.sub_mod a.high b.high) (carry a.low l); }
#push-options "--retry 10" // flaky
let sub_mod_pos_ok (a b:t) : Lemma
(requires (v a - v b >= 0))
(ensures (v (sub_mod_impl a b) = v a - v b)) =
assert (sub a b == sub_mod_impl a b);
()
#pop-options
val u64_diff_wrap : a:U64.t -> b:U64.t ->
Lemma (requires (U64.v a < U64.v b))
(ensures (U64.v (U64.sub_mod a b) == U64.v a - U64.v b + pow2 64))
let u64_diff_wrap a b = ()
#push-options "--z3rlimit 20"
val sub_mod_wrap1_ok : a:t -> b:t -> Lemma
(requires (v a - v b < 0 /\ U64.v a.low < U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128))
#push-options "--retry 10"
let sub_mod_wrap1_ok a b =
// carry == 1 and subtraction in low wraps
let l = U64.sub_mod a.low b.low in
assert (U64.v (carry a.low l) == 1);
u64_diff_wrap a.low b.low;
// a.high <= b.high since v a < v b;
// case split on equality and strictly less
if U64.v a.high = U64.v b.high then ()
else begin
u64_diff_wrap a.high b.high;
()
end
#pop-options
let sum_lt (a1 a2 b1 b2:nat) : Lemma
(requires (a1 + a2 < b1 + b2 /\ a1 >= b1))
(ensures (a2 < b2)) = ()
let sub_mod_wrap2_ok (a b:t) : Lemma
(requires (v a - v b < 0 /\ U64.v a.low >= U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
// carry == 0, subtraction in low is exact, but subtraction in high
// must wrap since v a < v b
let l = U64.sub_mod a.low b.low in
let r = sub_mod_impl a b in
assert (U64.v l == U64.v a.low - U64.v b.low);
assert (U64.v (carry a.low l) == 0);
sum_lt (U64.v a.low) (U64.v a.high * pow2 64) (U64.v b.low) (U64.v b.high * pow2 64);
assert (U64.v (U64.sub_mod a.high b.high) == U64.v a.high - U64.v b.high + pow2 64);
()
let sub_mod_wrap_ok (a b:t) : Lemma
(requires (v a - v b < 0))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
if U64.v a.low < U64.v b.low
then sub_mod_wrap1_ok a b
else sub_mod_wrap2_ok a b
#push-options "--z3rlimit 40"
let sub_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = (v a - v b) % pow2 128)) =
(if v a - v b >= 0
then sub_mod_pos_ok a b
else sub_mod_wrap_ok a b);
sub_mod_impl a b
#pop-options
val shift_bound : #n:nat -> num:UInt.uint_t n -> n':nat ->
Lemma (num * pow2 n' <= pow2 (n'+n) - pow2 n')
let shift_bound #n num n' =
Math.lemma_mult_le_right (pow2 n') num (pow2 n - 1);
Math.distributivity_sub_left (pow2 n) 1 (pow2 n');
Math.pow2_plus n' n
val append_uint : #n1:nat -> #n2:nat -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 -> UInt.uint_t (n1+n2)
let append_uint #n1 #n2 num1 num2 =
shift_bound num2 n1;
num1 + num2 * pow2 n1
val to_vec_append : #n1:nat{n1 > 0} -> #n2:nat{n2 > 0} -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 ->
Lemma (UInt.to_vec (append_uint num1 num2) == Seq.append (UInt.to_vec num2) (UInt.to_vec num1))
let to_vec_append #n1 #n2 num1 num2 =
UInt.append_lemma (UInt.to_vec num2) (UInt.to_vec num1)
let vec128 (a: t) : BV.bv_t 128 = UInt.to_vec #128 (v a)
let vec64 (a: U64.t) : BV.bv_t 64 = UInt.to_vec (U64.v a)
let to_vec_v (a: t) :
Lemma (vec128 a == Seq.append (vec64 a.high) (vec64 a.low)) =
to_vec_append (U64.v a.low) (U64.v a.high)
val logand_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2) ==
BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2))
(BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logand #128 (v a) (v b))) =
let r = { low = U64.logand a.low b.low;
high = U64.logand a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logand_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logand_vec (vec128 a) (vec128 b));
r
val logxor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2) ==
BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2))
(BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logxor #128 (v a) (v b))) =
let r = { low = U64.logxor a.low b.low;
high = U64.logxor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logxor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logxor_vec (vec128 a) (vec128 b));
r
val logor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2) ==
BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2))
(BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logor #128 (v a) (v b))) =
let r = { low = U64.logor a.low b.low;
high = U64.logor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logor_vec (vec128 a) (vec128 b));
r
val lognot_vec_append (#n1 #n2: pos) (a1: BV.bv_t n1) (a2: BV.bv_t n2) :
Lemma (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2) ==
BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot_vec_append #n1 #n2 a1 a2 =
Seq.lemma_eq_intro (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2))
(BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot (a: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.lognot #128 (v a))) =
let r = { low = U64.lognot a.low;
high = U64.lognot a.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
lognot_vec_append (vec64 a.high) (vec64 a.low);
to_vec_v a;
assert (vec128 r == BV.lognot_vec (vec128 a));
r
let mod_mul_cancel (n:nat) (k:nat{k > 0}) :
Lemma ((n * k) % k == 0) =
mod_spec (n * k) k;
mul_div_cancel n k;
()
let shift_past_mod (n:nat) (k1:nat) (k2:nat{k2 >= k1}) :
Lemma ((n * pow2 k2) % pow2 k1 == 0) =
assert (k2 == (k2 - k1) + k1);
Math.pow2_plus (k2 - k1) k1;
Math.paren_mul_right n (pow2 (k2 - k1)) (pow2 k1);
mod_mul_cancel (n * pow2 (k2 - k1)) (pow2 k1)
val mod_double: a:nat -> k:nat{k>0} ->
Lemma (a % k % k == a % k)
let mod_double a k =
mod_mod a k 1
let shift_left_large_val (#n1:nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s:nat) :
Lemma ((a1 + a2 * pow2 n1) * pow2 s == (a1 * pow2 s + a2 * pow2 (n1+s))) =
Math.distributivity_add_left a1 (a2 * pow2 n1) (pow2 s);
Math.paren_mul_right a2 (pow2 n1) (pow2 s);
Math.pow2_plus n1 s
#push-options "--z3rlimit 40"
let shift_left_large_lemma (#n1: nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s: nat{s >= n2}) :
Lemma (((a1 + a2 * pow2 n1) * pow2 s) % pow2 (n1+n2) ==
(a1 * pow2 s) % pow2 (n1+n2)) =
shift_left_large_val a1 a2 s;
mod_add (a1 * pow2 s) (a2 * pow2 (n1+s)) (pow2 (n1+n2));
shift_past_mod a2 (n1+n2) (n1+s);
mod_double (a1 * pow2 s) (pow2 (n1+n2));
()
#pop-options
val shift_left_large_lemma_t : a:t -> s:nat ->
Lemma (requires (s >= 64))
(ensures ((v a * pow2 s) % pow2 128 ==
(U64.v a.low * pow2 s) % pow2 128))
let shift_left_large_lemma_t a s =
shift_left_large_lemma #64 #64 (U64.v a.low) (U64.v a.high) s
private let u32_64: n:U32.t{U32.v n == 64} = U32.uint_to_t 64
val div_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} -> Lemma
(requires True)
(ensures (a / pow2 (n1 - n2) == a * pow2 n2 / pow2 n1))
let div_pow2_diff a n1 n2 =
Math.pow2_plus n2 (n1-n2);
assert (a * pow2 n2 / pow2 n1 == a * pow2 n2 / (pow2 n2 * pow2 (n1 - n2)));
div_product (a * pow2 n2) (pow2 n2) (pow2 (n1-n2));
mul_div_cancel a (pow2 n2)
val mod_mul_pow2 : n:nat -> e1:nat -> e2:nat ->
Lemma (n % pow2 e1 * pow2 e2 <= pow2 (e1+e2) - pow2 e2)
let mod_mul_pow2 n e1 e2 =
Math.lemma_mod_lt n (pow2 e1);
Math.lemma_mult_le_right (pow2 e2) (n % pow2 e1) (pow2 e1 - 1);
assert (n % pow2 e1 * pow2 e2 <= pow2 e1 * pow2 e2 - pow2 e2);
Math.pow2_plus e1 e2
let pow2_div_bound #b (n:UInt.uint_t b) (s:nat{s <= b}) :
Lemma (n / pow2 s < pow2 (b - s)) =
Math.lemma_div_lt n b s
#push-options "--smtencoding.l_arith_repr native --z3rlimit 40"
let add_u64_shift_left (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r = (U64.v hi * pow2 (U32.v s)) % pow2 64 + U64.v lo / pow2 (64 - U32.v s))) =
let high = U64.shift_left hi s in
let low = U64.shift_right lo (U32.sub u32_64 s) in
let s = U32.v s in
let high_n = U64.v hi % pow2 (64 - s) * pow2 s in
let low_n = U64.v lo / pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 s) (pow2 (64-s));
assert (U64.v high == high_n);
assert (U64.v low == low_n);
pow2_div_bound (U64.v lo) (64-s);
assert (low_n < pow2 s);
mod_mul_pow2 (U64.v hi) (64 - s) s;
U64.add high low
#pop-options
let div_plus_multiple (a:nat) (b:nat) (k:pos) :
Lemma (requires (a < k))
(ensures ((a + b * k) / k == b)) =
Math.division_addition_lemma a k b;
Math.small_division_lemma_1 a k
val div_add_small: n:nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (k1*m / (k1*k2) == (n + k1*m) / (k1*k2)))
let div_add_small n m k1 k2 =
div_product (k1*m) k1 k2;
div_product (n+k1*m) k1 k2;
mul_div_cancel m k1;
assert (k1*m/k1 == m);
div_plus_multiple n m k1
val add_mod_small: n: nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (n + (k1 * m) % (k1 * k2) ==
(n + k1 * m) % (k1 * k2)))
let add_mod_small n m k1 k2 =
mod_spec (k1 * m) (k1 * k2);
mod_spec (n + k1 * m) (k1 * k2);
div_add_small n m k1 k2
let mod_then_mul_64 (n:nat) : Lemma (n % pow2 64 * pow2 64 == n * pow2 64 % pow2 128) =
Math.pow2_plus 64 64;
mod_mul n (pow2 64) (pow2 64)
let mul_abc_to_acb (a b c: int) : Lemma (a * b * c == a * c * b) = ()
let add_u64_shift_left_respec (hi lo:U64.t) (s:U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r ->
U64.v r * pow2 64 ==
(U64.v hi * pow2 64) * pow2 (U32.v s) % pow2 128 +
U64.v lo * pow2 (U32.v s) / pow2 64 * pow2 64)) =
let r = add_u64_shift_left hi lo s in
let hi = U64.v hi in
let lo = U64.v lo in
let s = U32.v s in
// spec of add_u64_shift_left
assert (U64.v r == hi * pow2 s % pow2 64 + lo / pow2 (64 - s));
Math.distributivity_add_left (hi * pow2 s % pow2 64) (lo / pow2 (64-s)) (pow2 64);
mod_then_mul_64 (hi * pow2 s);
assert (hi * pow2 s % pow2 64 * pow2 64 == (hi * pow2 s * pow2 64) % pow2 128);
div_pow2_diff lo 64 s;
assert (lo / pow2 (64-s) == lo * pow2 s / pow2 64);
assert (U64.v r * pow2 64 == hi * pow2 s * pow2 64 % pow2 128 + lo * pow2 s / pow2 64 * pow2 64);
mul_abc_to_acb hi (pow2 s) (pow2 64);
r
val add_mod_small' : n:nat -> m:nat -> k:pos ->
Lemma (requires (n + m % k < k))
(ensures (n + m % k == (n + m) % k))
let add_mod_small' n m k =
Math.lemma_mod_lt (n + m % k) k;
Math.modulo_lemma n k;
mod_add n m k
#push-options "--retry 5"
let shift_t_val (a: t) (s: nat) :
Lemma (v a * pow2 s == U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s)) =
Math.pow2_plus 64 s;
()
#pop-options
val mul_mod_bound : n:nat -> s1:nat -> s2:nat{s2>=s1} ->
Lemma (n * pow2 s1 % pow2 s2 <= pow2 s2 - pow2 s1)
#push-options "--retry 5"
let mul_mod_bound n s1 s2 =
// n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1
// n % pow2 (s2-s1) <= pow2 (s2-s1) - 1
// n % pow2 (s2-s1) * pow2 s1 <= pow2 s2 - pow2 s1
mod_mul n (pow2 s1) (pow2 (s2-s1));
// assert (n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1);
Math.lemma_mod_lt n (pow2 (s2-s1));
Math.lemma_mult_le_right (pow2 s1) (n % pow2 (s2-s1)) (pow2 (s2-s1) - 1);
Math.pow2_plus (s2-s1) s1
#pop-options
let add_lt_le (a a' b b': int) :
Lemma (requires (a < a' /\ b <= b'))
(ensures (a + b < a' + b')) = ()
let u64_pow2_bound (a: UInt.uint_t 64) (s: nat) :
Lemma (a * pow2 s < pow2 (64+s)) =
Math.pow2_plus 64 s;
Math.lemma_mult_le_right (pow2 s) a (pow2 64)
let shift_t_mod_val' (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s) % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
u64_pow2_bound a_l s;
mul_mod_bound a_h (64+s) 128;
// assert (a_h * pow2 (64+s) % pow2 128 <= pow2 128 - pow2 (64+s));
add_lt_le (a_l * pow2 s) (pow2 (64+s)) (a_h * pow2 (64+s) % pow2 128) (pow2 128 - pow2 (64+s));
add_mod_small' (a_l * pow2 s) (a_h * pow2 (64+s)) (pow2 128);
shift_t_val a s;
()
let shift_t_mod_val (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + (U64.v a.high * pow2 64) * pow2 s % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
shift_t_mod_val' a s;
Math.pow2_plus 64 s;
Math.paren_mul_right a_h (pow2 64) (pow2 s);
()
#push-options "--z3rlimit 20"
let shift_left_small (a: t) (s: U32.t) : Pure t
(requires (U32.v s < 64))
(ensures (fun r -> v r = (v a * pow2 (U32.v s)) % pow2 128)) =
if U32.eq s 0ul then a
else
let r = { low = U64.shift_left a.low s;
high = add_u64_shift_left_respec a.high a.low s; } in
let s = U32.v s in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
mod_spec_rew_n (a_l * pow2 s) (pow2 64);
shift_t_mod_val a s;
r
#pop-options
val shift_left_large : a:t -> s:U32.t{U32.v s >= 64 /\ U32.v s < 128} ->
r:t{v r = (v a * pow2 (U32.v s)) % pow2 128} | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 5,
"quake_keep": false,
"quake_lo": 1,
"retry": true,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 50,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val shift_left_large : a:t -> s:U32.t{U32.v s >= 64 /\ U32.v s < 128} ->
r:t{v r = (v a * pow2 (U32.v s)) % pow2 128} | [] | FStar.UInt128.shift_left_large | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: FStar.UInt128.t -> s: FStar.UInt32.t{FStar.UInt32.v s >= 64 /\ FStar.UInt32.v s < 128}
-> r:
FStar.UInt128.t
{FStar.UInt128.v r = FStar.UInt128.v a * Prims.pow2 (FStar.UInt32.v s) % Prims.pow2 128} | {
"end_col": 3,
"end_line": 706,
"start_col": 26,
"start_line": 696
} |
FStar.Pervasives.Lemma | val mul_wide_low_ok (x y: U64.t) : Lemma (mul_wide_low x y == (U64.v x * U64.v y) % pow2 64) | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let mul_wide_low_ok (x y: U64.t) :
Lemma (mul_wide_low x y == (U64.v x * U64.v y) % pow2 64) =
Math.pow2_plus 32 32;
mod_mul (plh x y + (phl x y + pll_h x y) % pow2 32) (pow2 32) (pow2 32);
assert (mul_wide_low x y ==
(plh x y + (phl x y + pll_h x y) % pow2 32) % pow2 32 * pow2 32 + pll_l x y);
add_mod_then_mod (plh x y) (phl x y + pll_h x y) (pow2 32);
assert (mul_wide_low x y == (plh x y + phl x y + pll_h x y) % pow2 32 * pow2 32 + pll_l x y);
mod_mul (plh x y + phl x y + pll_h x y) (pow2 32) (pow2 32);
shift_add (plh x y + phl x y + pll_h x y) (pll_l x y);
assert (mul_wide_low x y == ((plh x y + phl x y + pll_h x y) * pow2 32 + pll_l x y) % pow2 64);
product_low_expand x y | val mul_wide_low_ok (x y: U64.t) : Lemma (mul_wide_low x y == (U64.v x * U64.v y) % pow2 64)
let mul_wide_low_ok (x y: U64.t) : Lemma (mul_wide_low x y == (U64.v x * U64.v y) % pow2 64) = | false | null | true | Math.pow2_plus 32 32;
mod_mul (plh x y + (phl x y + pll_h x y) % pow2 32) (pow2 32) (pow2 32);
assert (mul_wide_low x y ==
((plh x y + (phl x y + pll_h x y) % pow2 32) % pow2 32) * pow2 32 + pll_l x y);
add_mod_then_mod (plh x y) (phl x y + pll_h x y) (pow2 32);
assert (mul_wide_low x y == ((plh x y + phl x y + pll_h x y) % pow2 32) * pow2 32 + pll_l x y);
mod_mul (plh x y + phl x y + pll_h x y) (pow2 32) (pow2 32);
shift_add (plh x y + phl x y + pll_h x y) (pll_l x y);
assert (mul_wide_low x y == ((plh x y + phl x y + pll_h x y) * pow2 32 + pll_l x y) % pow2 64);
product_low_expand x y | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [
"lemma"
] | [
"FStar.UInt64.t",
"FStar.UInt128.product_low_expand",
"Prims.unit",
"Prims._assert",
"Prims.eq2",
"Prims.int",
"FStar.UInt128.mul_wide_low",
"Prims.op_Modulus",
"Prims.op_Addition",
"FStar.Mul.op_Star",
"FStar.UInt128.plh",
"FStar.UInt128.phl",
"FStar.UInt128.pll_h",
"Prims.pow2",
"FStar.UInt128.pll_l",
"FStar.UInt128.shift_add",
"FStar.UInt128.mod_mul",
"FStar.UInt128.add_mod_then_mod",
"FStar.Math.Lemmas.pow2_plus",
"Prims.l_True",
"Prims.squash",
"FStar.UInt64.v",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract
let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63
let carry_uint64_ok (a b:uint_t 64)
: squash (int2bv (carry_uint64 a b) == carry_bv a b)
= _ by (T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl())
let fact1 (a b: uint_t 64) = carry_bv a b == int2bv 1
let fact0 (a b: uint_t 64) = carry_bv a b == int2bv 0
let lem_ult_1 (a b: uint_t 64)
: squash (bvult (int2bv a) (int2bv b) ==> fact1 a b)
= assert (bvult (int2bv a) (int2bv b) ==> fact1 a b)
by (T.norm [delta_only [`%fact1;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'";
smt())
let lem_ult_2 (a b:uint_t 64)
: squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
= assert (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
by (T.norm [delta_only [`%fact0;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'")
let int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b))
= introduce (a < b) ==> (bvult #n (int2bv #n a) (int2bv #n b))
with _ . FStar.BV.int2bv_lemma_ult_1 a b;
introduce (bvult #n (int2bv #n a) (int2bv #n b)) ==> (a < b)
with _ . FStar.BV.int2bv_lemma_ult_2 a b
let lem_ult (a b:uint_t 64)
: Lemma (if a < b
then fact1 a b
else fact0 a b)
= int2bv_ult a b;
lem_ult_1 a b;
lem_ult_2 a b
let constant_time_carry (a b: U64.t) : Tot U64.t =
let open U64 in
// CONSTANT_TIME_CARRY macro
// ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
// 63 = sizeof(a) * 8 - 1
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63ul
let carry_uint64_equiv (a b:UInt64.t)
: Lemma (U64.v (constant_time_carry a b) == carry_uint64 (U64.v a) (U64.v b))
= ()
// This type gets a special treatment in KaRaMeL and its definition is never
// printed in the resulting C file.
type uint128: Type0 = { low: U64.t; high: U64.t }
let t = uint128
let _ = intro_ambient n
let _ = intro_ambient t
[@@ noextract_to "krml"]
let v x = U64.v x.low + (U64.v x.high) * (pow2 64)
let div_mod (x:nat) (k:nat{k > 0}) : Lemma (x / k * k + x % k == x) = ()
let uint_to_t x =
div_mod x (pow2 64);
{ low = U64.uint_to_t (x % (pow2 64));
high = U64.uint_to_t (x / (pow2 64)); }
let v_inj (x1 x2: t): Lemma (requires (v x1 == v x2))
(ensures x1 == x2) =
assert (uint_to_t (v x1) == uint_to_t (v x2));
assert (uint_to_t (v x1) == x1);
assert (uint_to_t (v x2) == x2);
()
(* A weird helper used below... seems like the native encoding of
bitvectors may be making these proofs much harder than they should be? *)
let bv2int_fun (#n:pos) (a b : bv_t n)
: Lemma (requires a == b)
(ensures bv2int a == bv2int b)
= ()
(* This proof is quite brittle. It has a bunch of annotations to get
decent verification performance. *)
let constant_time_carry_ok (a b:U64.t)
: Lemma (constant_time_carry a b ==
(if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0))
= calc (==) {
U64.v (constant_time_carry a b);
(==) { carry_uint64_equiv a b }
carry_uint64 (U64.v a) (U64.v b);
(==) { inverse_num_lemma (carry_uint64 (U64.v a) (U64.v b)) }
bv2int (int2bv (carry_uint64 (U64.v a) (U64.v b)));
(==) { carry_uint64_ok (U64.v a) (U64.v b);
bv2int_fun (int2bv (carry_uint64 (U64.v a) (U64.v b))) (carry_bv (U64.v a) (U64.v b));
()
}
bv2int (carry_bv (U64.v a) (U64.v b));
(==) {
lem_ult (U64.v a) (U64.v b);
bv2int_fun (carry_bv (U64.v a) (U64.v b)) (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
}
bv2int
(if U64.v a < U64.v b
then int2bv 1
else int2bv 0);
};
assert (
bv2int (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
== U64.v (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)) by (T.norm []);
U64.v_inj (constant_time_carry a b) (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)
let carry (a b: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r == (if U64.v a < U64.v b then 1 else 0))) =
constant_time_carry_ok a b;
constant_time_carry a b
let carry_sum_ok (a b:U64.t) :
Lemma (U64.v (carry (U64.add_mod a b) b) == (U64.v a + U64.v b) / (pow2 64)) = ()
let add (a b: t) : Pure t
(requires (v a + v b < pow2 128))
(ensures (fun r -> v a + v b = v r)) =
let l = U64.add_mod a.low b.low in
carry_sum_ok a.low b.low;
{ low = l;
high = U64.add (U64.add a.high b.high) (carry l b.low); }
let add_underspec (a b: t) =
let l = U64.add_mod a.low b.low in
begin
if v a + v b < pow2 128
then carry_sum_ok a.low b.low
else ()
end;
{ low = l;
high = U64.add_underspec (U64.add_underspec a.high b.high) (carry l b.low); }
val mod_mod: a:nat -> k:nat{k>0} -> k':nat{k'>0} ->
Lemma ((a % k) % (k'*k) == a % k)
let mod_mod a k k' =
assert (a % k < k);
assert (a % k < k' * k)
let mod_spec (a:nat) (k:nat{k > 0}) :
Lemma (a % k == a - a / k * k) = ()
val div_product : n:nat -> m1:nat{m1>0} -> m2:nat{m2>0} ->
Lemma (n / (m1*m2) == (n / m1) / m2)
let div_product n m1 m2 =
Math.division_multiplication_lemma n m1 m2
val mul_div_cancel : n:nat -> k:nat{k>0} ->
Lemma ((n * k) / k == n)
let mul_div_cancel n k =
Math.cancel_mul_div n k
val mod_mul: n:nat -> k1:pos -> k2:pos ->
Lemma ((n % k2) * k1 == (n * k1) % (k1*k2))
let mod_mul n k1 k2 =
Math.modulo_scale_lemma n k1 k2
let mod_spec_rew_n (n:nat) (k:nat{k > 0}) :
Lemma (n == n / k * k + n % k) = mod_spec n k
val mod_add: n1:nat -> n2:nat -> k:nat{k > 0} ->
Lemma ((n1 % k + n2 % k) % k == (n1 + n2) % k)
let mod_add n1 n2 k = Math.modulo_distributivity n1 n2 k
val mod_add_small: n1:nat -> n2:nat -> k:nat{k > 0} -> Lemma
(requires (n1 % k + n2 % k < k))
(ensures (n1 % k + n2 % k == (n1 + n2) % k))
let mod_add_small n1 n2 k =
mod_add n1 n2 k;
Math.small_modulo_lemma_1 (n1%k + n2%k) k
// This proof is pretty stable with the calc proof, but it can fail
// ~1% of the times, so add a retry.
#push-options "--split_queries no --z3rlimit 20 --retry 5"
let add_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a + v b) % pow2 128 = v r)) =
let l = U64.add_mod a.low b.low in
let r = { low = l;
high = U64.add_mod (U64.add_mod a.high b.high) (carry l b.low)} in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
let b_l = U64.v b.low in
let b_h = U64.v b.high in
carry_sum_ok a.low b.low;
Math.lemma_mod_plus_distr_l (a_h + b_h) ((a_l + b_l) / (pow2 64)) (pow2 64);
calc (==) {
U64.v r.high * pow2 64;
== {}
((a_h + b_h + (a_l + b_l) / (pow2 64)) % pow2 64) * pow2 64;
== { mod_mul (a_h + b_h + (a_l + b_l) / (pow2 64)) (pow2 64) (pow2 64) }
((a_h + b_h + (a_l + b_l) / (pow2 64)) * pow2 64) % (pow2 64 * pow2 64);
== {}
((a_h + b_h + (a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
== {}
(a_h * pow2 64 + b_h * pow2 64 + ((a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
};
assert (U64.v r.low == (U64.v r.low) % pow2 128);
mod_add_small (a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64))
((a_l + b_l) % (pow2 64))
(pow2 128);
assert (U64.v r.low + U64.v r.high * pow2 64 ==
(a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64) + (a_l + b_l) % (pow2 64)) % pow2 128);
mod_spec_rew_n (a_l + b_l) (pow2 64);
assert (v r ==
(a_h * pow2 64 +
b_h * pow2 64 +
a_l + b_l) % pow2 128);
assert_spinoff ((v a + v b) % pow2 128 = v r);
r
#pop-options
#push-options "--retry 5"
let sub (a b: t) : Pure t
(requires (v a - v b >= 0))
(ensures (fun r -> v r = v a - v b)) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub (U64.sub a.high b.high) (carry a.low l); }
#pop-options
let sub_underspec (a b: t) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_underspec (U64.sub_underspec a.high b.high) (carry a.low l); }
let sub_mod_impl (a b: t) : t =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_mod (U64.sub_mod a.high b.high) (carry a.low l); }
#push-options "--retry 10" // flaky
let sub_mod_pos_ok (a b:t) : Lemma
(requires (v a - v b >= 0))
(ensures (v (sub_mod_impl a b) = v a - v b)) =
assert (sub a b == sub_mod_impl a b);
()
#pop-options
val u64_diff_wrap : a:U64.t -> b:U64.t ->
Lemma (requires (U64.v a < U64.v b))
(ensures (U64.v (U64.sub_mod a b) == U64.v a - U64.v b + pow2 64))
let u64_diff_wrap a b = ()
#push-options "--z3rlimit 20"
val sub_mod_wrap1_ok : a:t -> b:t -> Lemma
(requires (v a - v b < 0 /\ U64.v a.low < U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128))
#push-options "--retry 10"
let sub_mod_wrap1_ok a b =
// carry == 1 and subtraction in low wraps
let l = U64.sub_mod a.low b.low in
assert (U64.v (carry a.low l) == 1);
u64_diff_wrap a.low b.low;
// a.high <= b.high since v a < v b;
// case split on equality and strictly less
if U64.v a.high = U64.v b.high then ()
else begin
u64_diff_wrap a.high b.high;
()
end
#pop-options
let sum_lt (a1 a2 b1 b2:nat) : Lemma
(requires (a1 + a2 < b1 + b2 /\ a1 >= b1))
(ensures (a2 < b2)) = ()
let sub_mod_wrap2_ok (a b:t) : Lemma
(requires (v a - v b < 0 /\ U64.v a.low >= U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
// carry == 0, subtraction in low is exact, but subtraction in high
// must wrap since v a < v b
let l = U64.sub_mod a.low b.low in
let r = sub_mod_impl a b in
assert (U64.v l == U64.v a.low - U64.v b.low);
assert (U64.v (carry a.low l) == 0);
sum_lt (U64.v a.low) (U64.v a.high * pow2 64) (U64.v b.low) (U64.v b.high * pow2 64);
assert (U64.v (U64.sub_mod a.high b.high) == U64.v a.high - U64.v b.high + pow2 64);
()
let sub_mod_wrap_ok (a b:t) : Lemma
(requires (v a - v b < 0))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
if U64.v a.low < U64.v b.low
then sub_mod_wrap1_ok a b
else sub_mod_wrap2_ok a b
#push-options "--z3rlimit 40"
let sub_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = (v a - v b) % pow2 128)) =
(if v a - v b >= 0
then sub_mod_pos_ok a b
else sub_mod_wrap_ok a b);
sub_mod_impl a b
#pop-options
val shift_bound : #n:nat -> num:UInt.uint_t n -> n':nat ->
Lemma (num * pow2 n' <= pow2 (n'+n) - pow2 n')
let shift_bound #n num n' =
Math.lemma_mult_le_right (pow2 n') num (pow2 n - 1);
Math.distributivity_sub_left (pow2 n) 1 (pow2 n');
Math.pow2_plus n' n
val append_uint : #n1:nat -> #n2:nat -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 -> UInt.uint_t (n1+n2)
let append_uint #n1 #n2 num1 num2 =
shift_bound num2 n1;
num1 + num2 * pow2 n1
val to_vec_append : #n1:nat{n1 > 0} -> #n2:nat{n2 > 0} -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 ->
Lemma (UInt.to_vec (append_uint num1 num2) == Seq.append (UInt.to_vec num2) (UInt.to_vec num1))
let to_vec_append #n1 #n2 num1 num2 =
UInt.append_lemma (UInt.to_vec num2) (UInt.to_vec num1)
let vec128 (a: t) : BV.bv_t 128 = UInt.to_vec #128 (v a)
let vec64 (a: U64.t) : BV.bv_t 64 = UInt.to_vec (U64.v a)
let to_vec_v (a: t) :
Lemma (vec128 a == Seq.append (vec64 a.high) (vec64 a.low)) =
to_vec_append (U64.v a.low) (U64.v a.high)
val logand_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2) ==
BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2))
(BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logand #128 (v a) (v b))) =
let r = { low = U64.logand a.low b.low;
high = U64.logand a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logand_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logand_vec (vec128 a) (vec128 b));
r
val logxor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2) ==
BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2))
(BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logxor #128 (v a) (v b))) =
let r = { low = U64.logxor a.low b.low;
high = U64.logxor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logxor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logxor_vec (vec128 a) (vec128 b));
r
val logor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2) ==
BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2))
(BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logor #128 (v a) (v b))) =
let r = { low = U64.logor a.low b.low;
high = U64.logor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logor_vec (vec128 a) (vec128 b));
r
val lognot_vec_append (#n1 #n2: pos) (a1: BV.bv_t n1) (a2: BV.bv_t n2) :
Lemma (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2) ==
BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot_vec_append #n1 #n2 a1 a2 =
Seq.lemma_eq_intro (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2))
(BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot (a: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.lognot #128 (v a))) =
let r = { low = U64.lognot a.low;
high = U64.lognot a.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
lognot_vec_append (vec64 a.high) (vec64 a.low);
to_vec_v a;
assert (vec128 r == BV.lognot_vec (vec128 a));
r
let mod_mul_cancel (n:nat) (k:nat{k > 0}) :
Lemma ((n * k) % k == 0) =
mod_spec (n * k) k;
mul_div_cancel n k;
()
let shift_past_mod (n:nat) (k1:nat) (k2:nat{k2 >= k1}) :
Lemma ((n * pow2 k2) % pow2 k1 == 0) =
assert (k2 == (k2 - k1) + k1);
Math.pow2_plus (k2 - k1) k1;
Math.paren_mul_right n (pow2 (k2 - k1)) (pow2 k1);
mod_mul_cancel (n * pow2 (k2 - k1)) (pow2 k1)
val mod_double: a:nat -> k:nat{k>0} ->
Lemma (a % k % k == a % k)
let mod_double a k =
mod_mod a k 1
let shift_left_large_val (#n1:nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s:nat) :
Lemma ((a1 + a2 * pow2 n1) * pow2 s == (a1 * pow2 s + a2 * pow2 (n1+s))) =
Math.distributivity_add_left a1 (a2 * pow2 n1) (pow2 s);
Math.paren_mul_right a2 (pow2 n1) (pow2 s);
Math.pow2_plus n1 s
#push-options "--z3rlimit 40"
let shift_left_large_lemma (#n1: nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s: nat{s >= n2}) :
Lemma (((a1 + a2 * pow2 n1) * pow2 s) % pow2 (n1+n2) ==
(a1 * pow2 s) % pow2 (n1+n2)) =
shift_left_large_val a1 a2 s;
mod_add (a1 * pow2 s) (a2 * pow2 (n1+s)) (pow2 (n1+n2));
shift_past_mod a2 (n1+n2) (n1+s);
mod_double (a1 * pow2 s) (pow2 (n1+n2));
()
#pop-options
val shift_left_large_lemma_t : a:t -> s:nat ->
Lemma (requires (s >= 64))
(ensures ((v a * pow2 s) % pow2 128 ==
(U64.v a.low * pow2 s) % pow2 128))
let shift_left_large_lemma_t a s =
shift_left_large_lemma #64 #64 (U64.v a.low) (U64.v a.high) s
private let u32_64: n:U32.t{U32.v n == 64} = U32.uint_to_t 64
val div_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} -> Lemma
(requires True)
(ensures (a / pow2 (n1 - n2) == a * pow2 n2 / pow2 n1))
let div_pow2_diff a n1 n2 =
Math.pow2_plus n2 (n1-n2);
assert (a * pow2 n2 / pow2 n1 == a * pow2 n2 / (pow2 n2 * pow2 (n1 - n2)));
div_product (a * pow2 n2) (pow2 n2) (pow2 (n1-n2));
mul_div_cancel a (pow2 n2)
val mod_mul_pow2 : n:nat -> e1:nat -> e2:nat ->
Lemma (n % pow2 e1 * pow2 e2 <= pow2 (e1+e2) - pow2 e2)
let mod_mul_pow2 n e1 e2 =
Math.lemma_mod_lt n (pow2 e1);
Math.lemma_mult_le_right (pow2 e2) (n % pow2 e1) (pow2 e1 - 1);
assert (n % pow2 e1 * pow2 e2 <= pow2 e1 * pow2 e2 - pow2 e2);
Math.pow2_plus e1 e2
let pow2_div_bound #b (n:UInt.uint_t b) (s:nat{s <= b}) :
Lemma (n / pow2 s < pow2 (b - s)) =
Math.lemma_div_lt n b s
#push-options "--smtencoding.l_arith_repr native --z3rlimit 40"
let add_u64_shift_left (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r = (U64.v hi * pow2 (U32.v s)) % pow2 64 + U64.v lo / pow2 (64 - U32.v s))) =
let high = U64.shift_left hi s in
let low = U64.shift_right lo (U32.sub u32_64 s) in
let s = U32.v s in
let high_n = U64.v hi % pow2 (64 - s) * pow2 s in
let low_n = U64.v lo / pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 s) (pow2 (64-s));
assert (U64.v high == high_n);
assert (U64.v low == low_n);
pow2_div_bound (U64.v lo) (64-s);
assert (low_n < pow2 s);
mod_mul_pow2 (U64.v hi) (64 - s) s;
U64.add high low
#pop-options
let div_plus_multiple (a:nat) (b:nat) (k:pos) :
Lemma (requires (a < k))
(ensures ((a + b * k) / k == b)) =
Math.division_addition_lemma a k b;
Math.small_division_lemma_1 a k
val div_add_small: n:nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (k1*m / (k1*k2) == (n + k1*m) / (k1*k2)))
let div_add_small n m k1 k2 =
div_product (k1*m) k1 k2;
div_product (n+k1*m) k1 k2;
mul_div_cancel m k1;
assert (k1*m/k1 == m);
div_plus_multiple n m k1
val add_mod_small: n: nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (n + (k1 * m) % (k1 * k2) ==
(n + k1 * m) % (k1 * k2)))
let add_mod_small n m k1 k2 =
mod_spec (k1 * m) (k1 * k2);
mod_spec (n + k1 * m) (k1 * k2);
div_add_small n m k1 k2
let mod_then_mul_64 (n:nat) : Lemma (n % pow2 64 * pow2 64 == n * pow2 64 % pow2 128) =
Math.pow2_plus 64 64;
mod_mul n (pow2 64) (pow2 64)
let mul_abc_to_acb (a b c: int) : Lemma (a * b * c == a * c * b) = ()
let add_u64_shift_left_respec (hi lo:U64.t) (s:U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r ->
U64.v r * pow2 64 ==
(U64.v hi * pow2 64) * pow2 (U32.v s) % pow2 128 +
U64.v lo * pow2 (U32.v s) / pow2 64 * pow2 64)) =
let r = add_u64_shift_left hi lo s in
let hi = U64.v hi in
let lo = U64.v lo in
let s = U32.v s in
// spec of add_u64_shift_left
assert (U64.v r == hi * pow2 s % pow2 64 + lo / pow2 (64 - s));
Math.distributivity_add_left (hi * pow2 s % pow2 64) (lo / pow2 (64-s)) (pow2 64);
mod_then_mul_64 (hi * pow2 s);
assert (hi * pow2 s % pow2 64 * pow2 64 == (hi * pow2 s * pow2 64) % pow2 128);
div_pow2_diff lo 64 s;
assert (lo / pow2 (64-s) == lo * pow2 s / pow2 64);
assert (U64.v r * pow2 64 == hi * pow2 s * pow2 64 % pow2 128 + lo * pow2 s / pow2 64 * pow2 64);
mul_abc_to_acb hi (pow2 s) (pow2 64);
r
val add_mod_small' : n:nat -> m:nat -> k:pos ->
Lemma (requires (n + m % k < k))
(ensures (n + m % k == (n + m) % k))
let add_mod_small' n m k =
Math.lemma_mod_lt (n + m % k) k;
Math.modulo_lemma n k;
mod_add n m k
#push-options "--retry 5"
let shift_t_val (a: t) (s: nat) :
Lemma (v a * pow2 s == U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s)) =
Math.pow2_plus 64 s;
()
#pop-options
val mul_mod_bound : n:nat -> s1:nat -> s2:nat{s2>=s1} ->
Lemma (n * pow2 s1 % pow2 s2 <= pow2 s2 - pow2 s1)
#push-options "--retry 5"
let mul_mod_bound n s1 s2 =
// n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1
// n % pow2 (s2-s1) <= pow2 (s2-s1) - 1
// n % pow2 (s2-s1) * pow2 s1 <= pow2 s2 - pow2 s1
mod_mul n (pow2 s1) (pow2 (s2-s1));
// assert (n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1);
Math.lemma_mod_lt n (pow2 (s2-s1));
Math.lemma_mult_le_right (pow2 s1) (n % pow2 (s2-s1)) (pow2 (s2-s1) - 1);
Math.pow2_plus (s2-s1) s1
#pop-options
let add_lt_le (a a' b b': int) :
Lemma (requires (a < a' /\ b <= b'))
(ensures (a + b < a' + b')) = ()
let u64_pow2_bound (a: UInt.uint_t 64) (s: nat) :
Lemma (a * pow2 s < pow2 (64+s)) =
Math.pow2_plus 64 s;
Math.lemma_mult_le_right (pow2 s) a (pow2 64)
let shift_t_mod_val' (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s) % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
u64_pow2_bound a_l s;
mul_mod_bound a_h (64+s) 128;
// assert (a_h * pow2 (64+s) % pow2 128 <= pow2 128 - pow2 (64+s));
add_lt_le (a_l * pow2 s) (pow2 (64+s)) (a_h * pow2 (64+s) % pow2 128) (pow2 128 - pow2 (64+s));
add_mod_small' (a_l * pow2 s) (a_h * pow2 (64+s)) (pow2 128);
shift_t_val a s;
()
let shift_t_mod_val (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + (U64.v a.high * pow2 64) * pow2 s % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
shift_t_mod_val' a s;
Math.pow2_plus 64 s;
Math.paren_mul_right a_h (pow2 64) (pow2 s);
()
#push-options "--z3rlimit 20"
let shift_left_small (a: t) (s: U32.t) : Pure t
(requires (U32.v s < 64))
(ensures (fun r -> v r = (v a * pow2 (U32.v s)) % pow2 128)) =
if U32.eq s 0ul then a
else
let r = { low = U64.shift_left a.low s;
high = add_u64_shift_left_respec a.high a.low s; } in
let s = U32.v s in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
mod_spec_rew_n (a_l * pow2 s) (pow2 64);
shift_t_mod_val a s;
r
#pop-options
val shift_left_large : a:t -> s:U32.t{U32.v s >= 64 /\ U32.v s < 128} ->
r:t{v r = (v a * pow2 (U32.v s)) % pow2 128}
#push-options "--z3rlimit 50 --retry 5" // sporadically fails
let shift_left_large a s =
let h_shift = U32.sub s u32_64 in
assert (U32.v h_shift < 64);
let r = { low = U64.uint_to_t 0;
high = U64.shift_left a.low h_shift; } in
assert (U64.v r.high == (U64.v a.low * pow2 (U32.v s - 64)) % pow2 64);
mod_mul (U64.v a.low * pow2 (U32.v s - 64)) (pow2 64) (pow2 64);
Math.pow2_plus (U32.v s - 64) 64;
assert (U64.v r.high * pow2 64 == (U64.v a.low * pow2 (U32.v s)) % pow2 128);
shift_left_large_lemma_t a (U32.v s);
r
#pop-options
let shift_left a s =
if (U32.lt s u32_64) then shift_left_small a s
else shift_left_large a s
let add_u64_shift_right (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r == U64.v lo / pow2 (U32.v s) +
U64.v hi * pow2 (64 - U32.v s) % pow2 64)) =
let low = U64.shift_right lo s in
let high = U64.shift_left hi (U32.sub u32_64 s) in
let s = U32.v s in
let low_n = U64.v lo / pow2 s in
let high_n = U64.v hi % pow2 s * pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 (64-s)) (pow2 s);
assert (U64.v high == high_n);
pow2_div_bound (U64.v lo) s;
assert (low_n < pow2 (64 - s));
mod_mul_pow2 (U64.v hi) s (64 - s);
U64.add low high
val mul_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} ->
Lemma (a * pow2 (n1 - n2) == a * pow2 n1 / pow2 n2)
let mul_pow2_diff a n1 n2 =
Math.paren_mul_right a (pow2 (n1-n2)) (pow2 n2);
mul_div_cancel (a * pow2 (n1 - n2)) (pow2 n2);
Math.pow2_plus (n1 - n2) n2
let add_u64_shift_right_respec (hi lo:U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r == U64.v lo / pow2 (U32.v s) +
U64.v hi * pow2 64 / pow2 (U32.v s) % pow2 64)) =
let r = add_u64_shift_right hi lo s in
let s = U32.v s in
mul_pow2_diff (U64.v hi) 64 s;
r
let mul_div_spec (n:nat) (k:pos) : Lemma (n / k * k == n - n % k) = ()
let mul_distr_sub (n1 n2:nat) (k:nat) : Lemma ((n1 - n2) * k == n1 * k - n2 * k) = ()
val div_product_comm : n1:nat -> k1:pos -> k2:pos ->
Lemma (n1 / k1 / k2 == n1 / k2 / k1)
let div_product_comm n1 k1 k2 =
div_product n1 k1 k2;
div_product n1 k2 k1
val shift_right_reconstruct : a_h:UInt.uint_t 64 -> s:nat{s < 64} ->
Lemma (a_h * pow2 (64-s) == a_h / pow2 s * pow2 64 + a_h * pow2 64 / pow2 s % pow2 64)
let shift_right_reconstruct a_h s =
mul_pow2_diff a_h 64 s;
mod_spec_rew_n (a_h * pow2 (64-s)) (pow2 64);
div_product_comm (a_h * pow2 64) (pow2 s) (pow2 64);
mul_div_cancel a_h (pow2 64);
assert (a_h / pow2 s * pow2 64 == a_h * pow2 64 / pow2 s / pow2 64 * pow2 64);
()
val u128_div_pow2 (a: t) (s:nat{s < 64}) :
Lemma (v a / pow2 s == U64.v a.low / pow2 s + U64.v a.high * pow2 (64 - s))
let u128_div_pow2 a s =
Math.pow2_plus (64-s) s;
Math.paren_mul_right (U64.v a.high) (pow2 (64-s)) (pow2 s);
Math.division_addition_lemma (U64.v a.low) (pow2 s) (U64.v a.high * pow2 (64 - s))
let shift_right_small (a: t) (s: U32.t{U32.v s < 64}) : Pure t
(requires True)
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
if U32.eq s 0ul then a
else
let r = { low = add_u64_shift_right_respec a.high a.low s;
high = U64.shift_right a.high s; } in
let a_h = U64.v a.high in
let a_l = U64.v a.low in
let s = U32.v s in
shift_right_reconstruct a_h s;
assert (v r == a_h * pow2 (64-s) + a_l / pow2 s);
u128_div_pow2 a s;
r
let shift_right_large (a: t) (s: U32.t{U32.v s >= 64 /\ U32.v s < 128}) : Pure t
(requires True)
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
let r = { high = U64.uint_to_t 0;
low = U64.shift_right a.high (U32.sub s u32_64); } in
let s = U32.v s in
Math.pow2_plus 64 (s - 64);
div_product (v a) (pow2 64) (pow2 (s - 64));
assert (v a / pow2 s == v a / pow2 64 / pow2 (s - 64));
div_plus_multiple (U64.v a.low) (U64.v a.high) (pow2 64);
r
let shift_right (a: t) (s: U32.t) : Pure t
(requires (U32.v s < 128))
(ensures (fun r -> v r == v a / pow2 (U32.v s))) =
if U32.lt s u32_64
then shift_right_small a s
else shift_right_large a s
let eq (a b:t) = U64.eq a.low b.low && U64.eq a.high b.high
let gt (a b:t) = U64.gt a.high b.high ||
(U64.eq a.high b.high && U64.gt a.low b.low)
let lt (a b:t) = U64.lt a.high b.high ||
(U64.eq a.high b.high && U64.lt a.low b.low)
let gte (a b:t) = U64.gt a.high b.high ||
(U64.eq a.high b.high && U64.gte a.low b.low)
let lte (a b:t) = U64.lt a.high b.high ||
(U64.eq a.high b.high && U64.lte a.low b.low)
let u64_logand_comm (a b:U64.t) : Lemma (U64.logand a b == U64.logand b a) =
UInt.logand_commutative (U64.v a) (U64.v b)
val u64_and_0 (a b:U64.t) :
Lemma (U64.v b = 0 ==> U64.v (U64.logand a b) = 0)
[SMTPat (U64.logand a b)]
let u64_and_0 a b = UInt.logand_lemma_1 (U64.v a)
let u64_0_and (a b:U64.t) :
Lemma (U64.v a = 0 ==> U64.v (U64.logand a b) = 0)
[SMTPat (U64.logand a b)] =
u64_logand_comm a b
val u64_1s_and (a b:U64.t) :
Lemma (U64.v a = pow2 64 - 1 /\
U64.v b = pow2 64 - 1 ==> U64.v (U64.logand a b) = pow2 64 - 1)
[SMTPat (U64.logand a b)]
let u64_1s_and a b = UInt.logand_lemma_2 (U64.v a)
let eq_mask (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a = v b ==> v r = pow2 128 - 1) /\ (v a <> v b ==> v r = 0))) =
let mask = U64.logand (U64.eq_mask a.low b.low)
(U64.eq_mask a.high b.high) in
{ low = mask; high = mask; }
private let gte_characterization (a b: t) :
Lemma (v a >= v b ==>
U64.v a.high > U64.v b.high \/
(U64.v a.high = U64.v b.high /\ U64.v a.low >= U64.v b.low)) = ()
private let lt_characterization (a b: t) :
Lemma (v a < v b ==>
U64.v a.high < U64.v b.high \/
(U64.v a.high = U64.v b.high /\ U64.v a.low < U64.v b.low)) = ()
let u64_logor_comm (a b:U64.t) : Lemma (U64.logor a b == U64.logor b a) =
UInt.logor_commutative (U64.v a) (U64.v b)
val u64_or_1 (a b:U64.t) :
Lemma (U64.v b = pow2 64 - 1 ==> U64.v (U64.logor a b) = pow2 64 - 1)
[SMTPat (U64.logor a b)]
let u64_or_1 a b = UInt.logor_lemma_2 (U64.v a)
let u64_1_or (a b:U64.t) :
Lemma (U64.v a = pow2 64 - 1 ==> U64.v (U64.logor a b) = pow2 64 - 1)
[SMTPat (U64.logor a b)] =
u64_logor_comm a b
val u64_or_0 (a b:U64.t) :
Lemma (U64.v a = 0 /\ U64.v b = 0 ==> U64.v (U64.logor a b) = 0)
[SMTPat (U64.logor a b)]
let u64_or_0 a b = UInt.logor_lemma_1 (U64.v a)
val u64_not_0 (a:U64.t) :
Lemma (U64.v a = 0 ==> U64.v (U64.lognot a) = pow2 64 - 1)
[SMTPat (U64.lognot a)]
let u64_not_0 a = UInt.lognot_lemma_1 #64
val u64_not_1 (a:U64.t) :
Lemma (U64.v a = pow2 64 - 1 ==> U64.v (U64.lognot a) = 0)
[SMTPat (U64.lognot a)]
let u64_not_1 a =
UInt.nth_lemma (UInt.lognot #64 (UInt.ones 64)) (UInt.zero 64)
let gte_mask (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a >= v b ==> v r = pow2 128 - 1) /\ (v a < v b ==> v r = 0))) =
let mask_hi_gte = U64.logand (U64.gte_mask a.high b.high)
(U64.lognot (U64.eq_mask a.high b.high)) in
let mask_lo_gte = U64.logand (U64.eq_mask a.high b.high)
(U64.gte_mask a.low b.low) in
let mask = U64.logor mask_hi_gte mask_lo_gte in
gte_characterization a b;
lt_characterization a b;
{ low = mask; high = mask; }
let uint64_to_uint128 (a:U64.t) = { low = a; high = U64.uint_to_t 0; }
let uint128_to_uint64 (a:t) : b:U64.t{U64.v b == v a % pow2 64} = a.low
inline_for_extraction
let u64_l32_mask: x:U64.t{U64.v x == pow2 32 - 1} = U64.uint_to_t 0xffffffff
let u64_mod_32 (a: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r = U64.v a % pow2 32)) =
UInt.logand_mask (U64.v a) 32;
U64.logand a u64_l32_mask
let u64_32_digits (a: U64.t) : Lemma (U64.v a / pow2 32 * pow2 32 + U64.v a % pow2 32 == U64.v a) =
div_mod (U64.v a) (pow2 32)
val mul32_digits : x:UInt.uint_t 64 -> y:UInt.uint_t 32 ->
Lemma (x * y == (x / pow2 32 * y) * pow2 32 + (x % pow2 32) * y)
let mul32_digits x y = ()
let u32_32 : x:U32.t{U32.v x == 32} = U32.uint_to_t 32
#push-options "--z3rlimit 30"
let u32_combine (hi lo: U64.t) : Pure U64.t
(requires (U64.v lo < pow2 32))
(ensures (fun r -> U64.v r = U64.v hi % pow2 32 * pow2 32 + U64.v lo)) =
U64.add lo (U64.shift_left hi u32_32)
#pop-options
let product_bound (a b:nat) (k:pos) :
Lemma (requires (a < k /\ b < k))
(ensures a * b <= k * k - 2*k + 1) =
Math.lemma_mult_le_right b a (k-1);
Math.lemma_mult_le_left (k-1) b (k-1)
val uint_product_bound : #n:nat -> a:UInt.uint_t n -> b:UInt.uint_t n ->
Lemma (a * b <= pow2 (2*n) - 2*(pow2 n) + 1)
let uint_product_bound #n a b =
product_bound a b (pow2 n);
Math.pow2_plus n n
val u32_product_bound : a:nat{a < pow2 32} -> b:nat{b < pow2 32} ->
Lemma (UInt.size (a * b) 64 /\ a * b < pow2 64 - pow2 32 - 1)
let u32_product_bound a b =
uint_product_bound #32 a b
let mul32 x y =
let x0 = u64_mod_32 x in
let x1 = U64.shift_right x u32_32 in
u32_product_bound (U64.v x0) (U32.v y);
let x0y = U64.mul x0 (FStar.Int.Cast.uint32_to_uint64 y) in
let x0yl = u64_mod_32 x0y in
let x0yh = U64.shift_right x0y u32_32 in
u32_product_bound (U64.v x1) (U32.v y);
// not in the original C code
let x1y' = U64.mul x1 (FStar.Int.Cast.uint32_to_uint64 y) in
let x1y = U64.add x1y' x0yh in
// correspondence with C:
// r0 = r.low
// r0 is written using u32_combine hi lo = lo + hi << 32
// r1 = r.high
let r = { low = u32_combine x1y x0yl;
high = U64.shift_right x1y u32_32; } in
u64_32_digits x;
//assert (U64.v x == U64.v x1 * pow2 32 + U64.v x0);
assert (U64.v x0y == U64.v x0 * U32.v y);
u64_32_digits x0y;
//assert (U64.v x0y == U64.v x0yh * pow2 32 + U64.v x0yl);
assert (U64.v x1y' == U64.v x / pow2 32 * U32.v y);
mul32_digits (U64.v x) (U32.v y);
assert (U64.v x * U32.v y == U64.v x1y' * pow2 32 + U64.v x0y);
r
let l32 (x: UInt.uint_t 64) : UInt.uint_t 32 = x % pow2 32
let h32 (x: UInt.uint_t 64) : UInt.uint_t 32 = x / pow2 32
val mul32_bound : x:UInt.uint_t 32 -> y:UInt.uint_t 32 ->
n:UInt.uint_t 64{n < pow2 64 - pow2 32 - 1 /\ n == x * y}
let mul32_bound x y =
u32_product_bound x y;
x * y
let pll (x y: U64.t) : n:UInt.uint_t 64{n < pow2 64 - pow2 32 - 1} =
mul32_bound (l32 (U64.v x)) (l32 (U64.v y))
let plh (x y: U64.t) : n:UInt.uint_t 64{n < pow2 64 - pow2 32 - 1} =
mul32_bound (l32 (U64.v x)) (h32 (U64.v y))
let phl (x y: U64.t) : n:UInt.uint_t 64{n < pow2 64 - pow2 32 - 1} =
mul32_bound (h32 (U64.v x)) (l32 (U64.v y))
let phh (x y: U64.t) : n:UInt.uint_t 64{n < pow2 64 - pow2 32 - 1} =
mul32_bound (h32 (U64.v x)) (h32 (U64.v y))
let pll_l (x y: U64.t) : UInt.uint_t 32 =
l32 (pll x y)
let pll_h (x y: U64.t) : UInt.uint_t 32 =
h32 (pll x y)
let mul_wide_low (x y: U64.t) = (plh x y + (phl x y + pll_h x y) % pow2 32) * pow2 32 % pow2 64 + pll_l x y
let mul_wide_high (x y: U64.t) =
phh x y +
(phl x y + pll_h x y) / pow2 32 +
(plh x y + (phl x y + pll_h x y) % pow2 32) / pow2 32
inline_for_extraction noextract
let mul_wide_impl_t' (x y: U64.t) : Pure (tuple4 U64.t U64.t U64.t U64.t)
(requires True)
(ensures (fun r -> let (u1, w3, x', t') = r in
U64.v u1 == U64.v x % pow2 32 /\
U64.v w3 == pll_l x y /\
U64.v x' == h32 (U64.v x) /\
U64.v t' == phl x y + pll_h x y)) =
let u1 = u64_mod_32 x in
let v1 = u64_mod_32 y in
u32_product_bound (U64.v u1) (U64.v v1);
let t = U64.mul u1 v1 in
assert (U64.v t == pll x y);
let w3 = u64_mod_32 t in
assert (U64.v w3 == pll_l x y);
let k = U64.shift_right t u32_32 in
assert (U64.v k == pll_h x y);
let x' = U64.shift_right x u32_32 in
assert (U64.v x' == h32 (U64.v x));
u32_product_bound (U64.v x') (U64.v v1);
let t' = U64.add (U64.mul x' v1) k in
(u1, w3, x', t')
// similar to u32_combine, but use % 2^64 * 2^32
let u32_combine' (hi lo: U64.t) : Pure U64.t
(requires (U64.v lo < pow2 32))
(ensures (fun r -> U64.v r = U64.v hi * pow2 32 % pow2 64 + U64.v lo)) =
U64.add lo (U64.shift_left hi u32_32)
inline_for_extraction noextract
let mul_wide_impl (x: U64.t) (y: U64.t) :
Tot (r:t{U64.v r.low == mul_wide_low x y /\
U64.v r.high == mul_wide_high x y % pow2 64}) =
let (u1, w3, x', t') = mul_wide_impl_t' x y in
let k' = u64_mod_32 t' in
let w1 = U64.shift_right t' u32_32 in
assert (U64.v w1 == (phl x y + pll_h x y) / pow2 32);
let y' = U64.shift_right y u32_32 in
assert (U64.v y' == h32 (U64.v y));
u32_product_bound (U64.v u1) (U64.v y');
let t'' = U64.add (U64.mul u1 y') k' in
assert (U64.v t'' == plh x y + (phl x y + pll_h x y) % pow2 32);
let k'' = U64.shift_right t'' u32_32 in
assert (U64.v k'' == (plh x y + (phl x y + pll_h x y) % pow2 32) / pow2 32);
u32_product_bound (U64.v x') (U64.v y');
mod_mul_pow2 (U64.v t'') 32 64;
let r0 = u32_combine' t'' w3 in
// let r0 = U64.add (U64.shift_left t'' u32_32) w3 in
assert (U64.v r0 == (plh x y + (phl x y + pll_h x y) % pow2 32) * pow2 32 % pow2 64 + pll_l x y);
let xy_w1 = U64.add (U64.mul x' y') w1 in
assert (U64.v xy_w1 == phh x y + (phl x y + pll_h x y) / pow2 32);
let r1 = U64.add_mod xy_w1 k'' in
assert (U64.v r1 == (phh x y + (phl x y + pll_h x y) / pow2 32 + (plh x y + (phl x y + pll_h x y) % pow2 32) / pow2 32) % pow2 64);
let r = { low = r0; high = r1; } in
r
let product_sums (a b c d:nat) :
Lemma ((a + b) * (c + d) == a * c + a * d + b * c + b * d) = ()
val u64_32_product (xl xh yl yh:UInt.uint_t 32) :
Lemma ((xl + xh * pow2 32) * (yl + yh * pow2 32) ==
xl * yl + (xl * yh) * pow2 32 + (xh * yl) * pow2 32 + (xh * yh) * pow2 64)
#push-options "--z3rlimit 25"
let u64_32_product xl xh yl yh =
assert (xh >= 0); //flakiness; without this, can't prove that (xh * pow2 32) >= 0
assert (pow2 32 >= 0); //flakiness; without this, can't prove that (xh * pow2 32) >= 0
assert (xh*pow2 32 >= 0);
product_sums xl (xh*pow2 32) yl (yh*pow2 32);
mul_abc_to_acb xh (pow2 32) yl;
assert (xl * (yh * pow2 32) == (xl * yh) * pow2 32);
Math.pow2_plus 32 32;
assert ((xh * pow2 32) * (yh * pow2 32) == (xh * yh) * pow2 64)
#pop-options
let product_expand (x y: U64.t) :
Lemma (U64.v x * U64.v y == phh x y * pow2 64 +
(plh x y + phl x y + pll_h x y) * pow2 32 +
pll_l x y) =
assert (U64.v x == l32 (U64.v x) + h32 (U64.v x) * pow2 32);
assert (U64.v y == l32 (U64.v y) + h32 (U64.v y) * pow2 32);
u64_32_product (l32 (U64.v x)) (h32 (U64.v x)) (l32 (U64.v y)) (h32 (U64.v y))
let product_low_expand (x y: U64.t) :
Lemma ((U64.v x * U64.v y) % pow2 64 ==
((plh x y + phl x y + pll_h x y) * pow2 32 + pll_l x y) % pow2 64) =
product_expand x y;
Math.lemma_mod_plus ((plh x y + phl x y + pll_h x y) * pow2 32 + pll_l x y) (phh x y) (pow2 64)
let add_mod_then_mod (n m:nat) (k:pos) :
Lemma ((n + m % k) % k == (n + m) % k) =
mod_add n m k;
mod_add n (m % k) k;
mod_double m k
let shift_add (n:nat) (m:nat{m < pow2 32}) :
Lemma (n * pow2 32 % pow2 64 + m == (n * pow2 32 + m) % pow2 64) =
add_mod_small' m (n*pow2 32) (pow2 64)
let mul_wide_low_ok (x y: U64.t) : | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val mul_wide_low_ok (x y: U64.t) : Lemma (mul_wide_low x y == (U64.v x * U64.v y) % pow2 64) | [] | FStar.UInt128.mul_wide_low_ok | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | x: FStar.UInt64.t -> y: FStar.UInt64.t
-> FStar.Pervasives.Lemma
(ensures FStar.UInt128.mul_wide_low x y == FStar.UInt64.v x * FStar.UInt64.v y % Prims.pow2 64) | {
"end_col": 24,
"end_line": 1106,
"start_col": 2,
"start_line": 1097
} |
FStar.Pervasives.Lemma | val shift_right_reconstruct : a_h:UInt.uint_t 64 -> s:nat{s < 64} ->
Lemma (a_h * pow2 (64-s) == a_h / pow2 s * pow2 64 + a_h * pow2 64 / pow2 s % pow2 64) | [
{
"abbrev": true,
"full_module": "FStar.Tactics.BV",
"short_module": "TBV"
},
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Tactics.V2",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.BV",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Math.Lemmas",
"short_module": "Math"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.BitVector",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": true,
"full_module": "FStar.UInt",
"short_module": "UInt"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.UInt",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let shift_right_reconstruct a_h s =
mul_pow2_diff a_h 64 s;
mod_spec_rew_n (a_h * pow2 (64-s)) (pow2 64);
div_product_comm (a_h * pow2 64) (pow2 s) (pow2 64);
mul_div_cancel a_h (pow2 64);
assert (a_h / pow2 s * pow2 64 == a_h * pow2 64 / pow2 s / pow2 64 * pow2 64);
() | val shift_right_reconstruct : a_h:UInt.uint_t 64 -> s:nat{s < 64} ->
Lemma (a_h * pow2 (64-s) == a_h / pow2 s * pow2 64 + a_h * pow2 64 / pow2 s % pow2 64)
let shift_right_reconstruct a_h s = | false | null | true | mul_pow2_diff a_h 64 s;
mod_spec_rew_n (a_h * pow2 (64 - s)) (pow2 64);
div_product_comm (a_h * pow2 64) (pow2 s) (pow2 64);
mul_div_cancel a_h (pow2 64);
assert ((a_h / pow2 s) * pow2 64 == (a_h * pow2 64 / pow2 s / pow2 64) * pow2 64);
() | {
"checked_file": "FStar.UInt128.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Tactics.V2.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.BV.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Calc.fsti.checked",
"FStar.BV.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "FStar.UInt128.fst"
} | [
"lemma"
] | [
"FStar.UInt.uint_t",
"Prims.nat",
"Prims.b2t",
"Prims.op_LessThan",
"Prims.unit",
"Prims._assert",
"Prims.eq2",
"Prims.int",
"FStar.Mul.op_Star",
"Prims.op_Division",
"Prims.pow2",
"FStar.UInt128.mul_div_cancel",
"FStar.UInt128.div_product_comm",
"FStar.UInt128.mod_spec_rew_n",
"Prims.op_Subtraction",
"FStar.UInt128.mul_pow2_diff"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.UInt128
open FStar.Mul
module UInt = FStar.UInt
module Seq = FStar.Seq
module BV = FStar.BitVector
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module Math = FStar.Math.Lemmas
open FStar.BV
open FStar.Tactics.V2
module T = FStar.Tactics.V2
module TBV = FStar.Tactics.BV
#set-options "--max_fuel 0 --max_ifuel 0 --split_queries no"
#set-options "--using_facts_from '*,-FStar.Tactics,-FStar.Reflection'"
(* TODO: explain why exactly this is needed? It leads to failures in
HACL* otherwise, claiming that some functions are not Low*. *)
#set-options "--normalize_pure_terms_for_extraction"
[@@ noextract_to "krml"]
noextract
let carry_uint64 (a b: uint_t 64) : Tot (uint_t 64) =
let ( ^^ ) = UInt.logxor in
let ( |^ ) = UInt.logor in
let ( -%^ ) = UInt.sub_mod in
let ( >>^ ) = UInt.shift_right in
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63
[@@ noextract_to "krml"]
noextract
let carry_bv (a b: uint_t 64) =
bvshr (bvxor (int2bv a)
(bvor (bvxor (int2bv a) (int2bv b)) (bvxor (bvsub (int2bv a) (int2bv b)) (int2bv b))))
63
let carry_uint64_ok (a b:uint_t 64)
: squash (int2bv (carry_uint64 a b) == carry_bv a b)
= _ by (T.norm [delta_only [`%carry_uint64]; unascribe];
let open FStar.Tactics.BV in
mapply (`trans);
arith_to_bv_tac ();
arith_to_bv_tac ();
T.norm [delta_only [`%carry_bv]];
trefl())
let fact1 (a b: uint_t 64) = carry_bv a b == int2bv 1
let fact0 (a b: uint_t 64) = carry_bv a b == int2bv 0
let lem_ult_1 (a b: uint_t 64)
: squash (bvult (int2bv a) (int2bv b) ==> fact1 a b)
= assert (bvult (int2bv a) (int2bv b) ==> fact1 a b)
by (T.norm [delta_only [`%fact1;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'";
smt())
let lem_ult_2 (a b:uint_t 64)
: squash (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
= assert (not (bvult (int2bv a) (int2bv b)) ==> fact0 a b)
by (T.norm [delta_only [`%fact0;`%carry_bv]];
set_options "--smtencoding.elim_box true --using_facts_from '__Nothing__' --z3smtopt '(set-option :smt.case_split 1)'")
let int2bv_ult (#n: pos) (a b: uint_t n)
: Lemma (ensures a < b <==> bvult #n (int2bv #n a) (int2bv #n b))
= introduce (a < b) ==> (bvult #n (int2bv #n a) (int2bv #n b))
with _ . FStar.BV.int2bv_lemma_ult_1 a b;
introduce (bvult #n (int2bv #n a) (int2bv #n b)) ==> (a < b)
with _ . FStar.BV.int2bv_lemma_ult_2 a b
let lem_ult (a b:uint_t 64)
: Lemma (if a < b
then fact1 a b
else fact0 a b)
= int2bv_ult a b;
lem_ult_1 a b;
lem_ult_2 a b
let constant_time_carry (a b: U64.t) : Tot U64.t =
let open U64 in
// CONSTANT_TIME_CARRY macro
// ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
// 63 = sizeof(a) * 8 - 1
a ^^ ((a ^^ b) |^ ((a -%^ b) ^^ b)) >>^ 63ul
let carry_uint64_equiv (a b:UInt64.t)
: Lemma (U64.v (constant_time_carry a b) == carry_uint64 (U64.v a) (U64.v b))
= ()
// This type gets a special treatment in KaRaMeL and its definition is never
// printed in the resulting C file.
type uint128: Type0 = { low: U64.t; high: U64.t }
let t = uint128
let _ = intro_ambient n
let _ = intro_ambient t
[@@ noextract_to "krml"]
let v x = U64.v x.low + (U64.v x.high) * (pow2 64)
let div_mod (x:nat) (k:nat{k > 0}) : Lemma (x / k * k + x % k == x) = ()
let uint_to_t x =
div_mod x (pow2 64);
{ low = U64.uint_to_t (x % (pow2 64));
high = U64.uint_to_t (x / (pow2 64)); }
let v_inj (x1 x2: t): Lemma (requires (v x1 == v x2))
(ensures x1 == x2) =
assert (uint_to_t (v x1) == uint_to_t (v x2));
assert (uint_to_t (v x1) == x1);
assert (uint_to_t (v x2) == x2);
()
(* A weird helper used below... seems like the native encoding of
bitvectors may be making these proofs much harder than they should be? *)
let bv2int_fun (#n:pos) (a b : bv_t n)
: Lemma (requires a == b)
(ensures bv2int a == bv2int b)
= ()
(* This proof is quite brittle. It has a bunch of annotations to get
decent verification performance. *)
let constant_time_carry_ok (a b:U64.t)
: Lemma (constant_time_carry a b ==
(if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0))
= calc (==) {
U64.v (constant_time_carry a b);
(==) { carry_uint64_equiv a b }
carry_uint64 (U64.v a) (U64.v b);
(==) { inverse_num_lemma (carry_uint64 (U64.v a) (U64.v b)) }
bv2int (int2bv (carry_uint64 (U64.v a) (U64.v b)));
(==) { carry_uint64_ok (U64.v a) (U64.v b);
bv2int_fun (int2bv (carry_uint64 (U64.v a) (U64.v b))) (carry_bv (U64.v a) (U64.v b));
()
}
bv2int (carry_bv (U64.v a) (U64.v b));
(==) {
lem_ult (U64.v a) (U64.v b);
bv2int_fun (carry_bv (U64.v a) (U64.v b)) (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
}
bv2int
(if U64.v a < U64.v b
then int2bv 1
else int2bv 0);
};
assert (
bv2int (if U64.v a < U64.v b then int2bv 1 else int2bv 0)
== U64.v (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)) by (T.norm []);
U64.v_inj (constant_time_carry a b) (if U64.lt a b then U64.uint_to_t 1 else U64.uint_to_t 0)
let carry (a b: U64.t) : Pure U64.t
(requires True)
(ensures (fun r -> U64.v r == (if U64.v a < U64.v b then 1 else 0))) =
constant_time_carry_ok a b;
constant_time_carry a b
let carry_sum_ok (a b:U64.t) :
Lemma (U64.v (carry (U64.add_mod a b) b) == (U64.v a + U64.v b) / (pow2 64)) = ()
let add (a b: t) : Pure t
(requires (v a + v b < pow2 128))
(ensures (fun r -> v a + v b = v r)) =
let l = U64.add_mod a.low b.low in
carry_sum_ok a.low b.low;
{ low = l;
high = U64.add (U64.add a.high b.high) (carry l b.low); }
let add_underspec (a b: t) =
let l = U64.add_mod a.low b.low in
begin
if v a + v b < pow2 128
then carry_sum_ok a.low b.low
else ()
end;
{ low = l;
high = U64.add_underspec (U64.add_underspec a.high b.high) (carry l b.low); }
val mod_mod: a:nat -> k:nat{k>0} -> k':nat{k'>0} ->
Lemma ((a % k) % (k'*k) == a % k)
let mod_mod a k k' =
assert (a % k < k);
assert (a % k < k' * k)
let mod_spec (a:nat) (k:nat{k > 0}) :
Lemma (a % k == a - a / k * k) = ()
val div_product : n:nat -> m1:nat{m1>0} -> m2:nat{m2>0} ->
Lemma (n / (m1*m2) == (n / m1) / m2)
let div_product n m1 m2 =
Math.division_multiplication_lemma n m1 m2
val mul_div_cancel : n:nat -> k:nat{k>0} ->
Lemma ((n * k) / k == n)
let mul_div_cancel n k =
Math.cancel_mul_div n k
val mod_mul: n:nat -> k1:pos -> k2:pos ->
Lemma ((n % k2) * k1 == (n * k1) % (k1*k2))
let mod_mul n k1 k2 =
Math.modulo_scale_lemma n k1 k2
let mod_spec_rew_n (n:nat) (k:nat{k > 0}) :
Lemma (n == n / k * k + n % k) = mod_spec n k
val mod_add: n1:nat -> n2:nat -> k:nat{k > 0} ->
Lemma ((n1 % k + n2 % k) % k == (n1 + n2) % k)
let mod_add n1 n2 k = Math.modulo_distributivity n1 n2 k
val mod_add_small: n1:nat -> n2:nat -> k:nat{k > 0} -> Lemma
(requires (n1 % k + n2 % k < k))
(ensures (n1 % k + n2 % k == (n1 + n2) % k))
let mod_add_small n1 n2 k =
mod_add n1 n2 k;
Math.small_modulo_lemma_1 (n1%k + n2%k) k
// This proof is pretty stable with the calc proof, but it can fail
// ~1% of the times, so add a retry.
#push-options "--split_queries no --z3rlimit 20 --retry 5"
let add_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> (v a + v b) % pow2 128 = v r)) =
let l = U64.add_mod a.low b.low in
let r = { low = l;
high = U64.add_mod (U64.add_mod a.high b.high) (carry l b.low)} in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
let b_l = U64.v b.low in
let b_h = U64.v b.high in
carry_sum_ok a.low b.low;
Math.lemma_mod_plus_distr_l (a_h + b_h) ((a_l + b_l) / (pow2 64)) (pow2 64);
calc (==) {
U64.v r.high * pow2 64;
== {}
((a_h + b_h + (a_l + b_l) / (pow2 64)) % pow2 64) * pow2 64;
== { mod_mul (a_h + b_h + (a_l + b_l) / (pow2 64)) (pow2 64) (pow2 64) }
((a_h + b_h + (a_l + b_l) / (pow2 64)) * pow2 64) % (pow2 64 * pow2 64);
== {}
((a_h + b_h + (a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
== {}
(a_h * pow2 64 + b_h * pow2 64 + ((a_l + b_l)/(pow2 64)) * pow2 64)
% pow2 128;
};
assert (U64.v r.low == (U64.v r.low) % pow2 128);
mod_add_small (a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64))
((a_l + b_l) % (pow2 64))
(pow2 128);
assert (U64.v r.low + U64.v r.high * pow2 64 ==
(a_h * pow2 64 +
b_h * pow2 64 +
(a_l + b_l) / (pow2 64) * (pow2 64) + (a_l + b_l) % (pow2 64)) % pow2 128);
mod_spec_rew_n (a_l + b_l) (pow2 64);
assert (v r ==
(a_h * pow2 64 +
b_h * pow2 64 +
a_l + b_l) % pow2 128);
assert_spinoff ((v a + v b) % pow2 128 = v r);
r
#pop-options
#push-options "--retry 5"
let sub (a b: t) : Pure t
(requires (v a - v b >= 0))
(ensures (fun r -> v r = v a - v b)) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub (U64.sub a.high b.high) (carry a.low l); }
#pop-options
let sub_underspec (a b: t) =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_underspec (U64.sub_underspec a.high b.high) (carry a.low l); }
let sub_mod_impl (a b: t) : t =
let l = U64.sub_mod a.low b.low in
{ low = l;
high = U64.sub_mod (U64.sub_mod a.high b.high) (carry a.low l); }
#push-options "--retry 10" // flaky
let sub_mod_pos_ok (a b:t) : Lemma
(requires (v a - v b >= 0))
(ensures (v (sub_mod_impl a b) = v a - v b)) =
assert (sub a b == sub_mod_impl a b);
()
#pop-options
val u64_diff_wrap : a:U64.t -> b:U64.t ->
Lemma (requires (U64.v a < U64.v b))
(ensures (U64.v (U64.sub_mod a b) == U64.v a - U64.v b + pow2 64))
let u64_diff_wrap a b = ()
#push-options "--z3rlimit 20"
val sub_mod_wrap1_ok : a:t -> b:t -> Lemma
(requires (v a - v b < 0 /\ U64.v a.low < U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128))
#push-options "--retry 10"
let sub_mod_wrap1_ok a b =
// carry == 1 and subtraction in low wraps
let l = U64.sub_mod a.low b.low in
assert (U64.v (carry a.low l) == 1);
u64_diff_wrap a.low b.low;
// a.high <= b.high since v a < v b;
// case split on equality and strictly less
if U64.v a.high = U64.v b.high then ()
else begin
u64_diff_wrap a.high b.high;
()
end
#pop-options
let sum_lt (a1 a2 b1 b2:nat) : Lemma
(requires (a1 + a2 < b1 + b2 /\ a1 >= b1))
(ensures (a2 < b2)) = ()
let sub_mod_wrap2_ok (a b:t) : Lemma
(requires (v a - v b < 0 /\ U64.v a.low >= U64.v b.low))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
// carry == 0, subtraction in low is exact, but subtraction in high
// must wrap since v a < v b
let l = U64.sub_mod a.low b.low in
let r = sub_mod_impl a b in
assert (U64.v l == U64.v a.low - U64.v b.low);
assert (U64.v (carry a.low l) == 0);
sum_lt (U64.v a.low) (U64.v a.high * pow2 64) (U64.v b.low) (U64.v b.high * pow2 64);
assert (U64.v (U64.sub_mod a.high b.high) == U64.v a.high - U64.v b.high + pow2 64);
()
let sub_mod_wrap_ok (a b:t) : Lemma
(requires (v a - v b < 0))
(ensures (v (sub_mod_impl a b) = v a - v b + pow2 128)) =
if U64.v a.low < U64.v b.low
then sub_mod_wrap1_ok a b
else sub_mod_wrap2_ok a b
#push-options "--z3rlimit 40"
let sub_mod (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = (v a - v b) % pow2 128)) =
(if v a - v b >= 0
then sub_mod_pos_ok a b
else sub_mod_wrap_ok a b);
sub_mod_impl a b
#pop-options
val shift_bound : #n:nat -> num:UInt.uint_t n -> n':nat ->
Lemma (num * pow2 n' <= pow2 (n'+n) - pow2 n')
let shift_bound #n num n' =
Math.lemma_mult_le_right (pow2 n') num (pow2 n - 1);
Math.distributivity_sub_left (pow2 n) 1 (pow2 n');
Math.pow2_plus n' n
val append_uint : #n1:nat -> #n2:nat -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 -> UInt.uint_t (n1+n2)
let append_uint #n1 #n2 num1 num2 =
shift_bound num2 n1;
num1 + num2 * pow2 n1
val to_vec_append : #n1:nat{n1 > 0} -> #n2:nat{n2 > 0} -> num1:UInt.uint_t n1 -> num2:UInt.uint_t n2 ->
Lemma (UInt.to_vec (append_uint num1 num2) == Seq.append (UInt.to_vec num2) (UInt.to_vec num1))
let to_vec_append #n1 #n2 num1 num2 =
UInt.append_lemma (UInt.to_vec num2) (UInt.to_vec num1)
let vec128 (a: t) : BV.bv_t 128 = UInt.to_vec #128 (v a)
let vec64 (a: U64.t) : BV.bv_t 64 = UInt.to_vec (U64.v a)
let to_vec_v (a: t) :
Lemma (vec128 a == Seq.append (vec64 a.high) (vec64 a.low)) =
to_vec_append (U64.v a.low) (U64.v a.high)
val logand_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2) ==
BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logand_vec a1 b1) (BV.logand_vec a2 b2))
(BV.logand_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logand (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logand #128 (v a) (v b))) =
let r = { low = U64.logand a.low b.low;
high = U64.logand a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logand_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logand_vec (vec128 a) (vec128 b));
r
val logxor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2) ==
BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logxor_vec a1 b1) (BV.logxor_vec a2 b2))
(BV.logxor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logxor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logxor #128 (v a) (v b))) =
let r = { low = U64.logxor a.low b.low;
high = U64.logxor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logxor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logxor_vec (vec128 a) (vec128 b));
r
val logor_vec_append (#n1 #n2: pos) (a1 b1: BV.bv_t n1) (a2 b2: BV.bv_t n2) :
Lemma (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2) ==
BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor_vec_append #n1 #n2 a1 b1 a2 b2 =
Seq.lemma_eq_intro (Seq.append (BV.logor_vec a1 b1) (BV.logor_vec a2 b2))
(BV.logor_vec #(n1 + n2) (Seq.append a1 a2) (Seq.append b1 b2))
let logor (a b: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.logor #128 (v a) (v b))) =
let r = { low = U64.logor a.low b.low;
high = U64.logor a.high b.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
logor_vec_append (vec64 a.high) (vec64 b.high)
(vec64 a.low) (vec64 b.low);
to_vec_v a;
to_vec_v b;
assert (vec128 r == BV.logor_vec (vec128 a) (vec128 b));
r
val lognot_vec_append (#n1 #n2: pos) (a1: BV.bv_t n1) (a2: BV.bv_t n2) :
Lemma (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2) ==
BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot_vec_append #n1 #n2 a1 a2 =
Seq.lemma_eq_intro (Seq.append (BV.lognot_vec a1) (BV.lognot_vec a2))
(BV.lognot_vec #(n1 + n2) (Seq.append a1 a2))
let lognot (a: t) : Pure t
(requires True)
(ensures (fun r -> v r = UInt.lognot #128 (v a))) =
let r = { low = U64.lognot a.low;
high = U64.lognot a.high } in
to_vec_v r;
assert (vec128 r == Seq.append (vec64 r.high) (vec64 r.low));
lognot_vec_append (vec64 a.high) (vec64 a.low);
to_vec_v a;
assert (vec128 r == BV.lognot_vec (vec128 a));
r
let mod_mul_cancel (n:nat) (k:nat{k > 0}) :
Lemma ((n * k) % k == 0) =
mod_spec (n * k) k;
mul_div_cancel n k;
()
let shift_past_mod (n:nat) (k1:nat) (k2:nat{k2 >= k1}) :
Lemma ((n * pow2 k2) % pow2 k1 == 0) =
assert (k2 == (k2 - k1) + k1);
Math.pow2_plus (k2 - k1) k1;
Math.paren_mul_right n (pow2 (k2 - k1)) (pow2 k1);
mod_mul_cancel (n * pow2 (k2 - k1)) (pow2 k1)
val mod_double: a:nat -> k:nat{k>0} ->
Lemma (a % k % k == a % k)
let mod_double a k =
mod_mod a k 1
let shift_left_large_val (#n1:nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s:nat) :
Lemma ((a1 + a2 * pow2 n1) * pow2 s == (a1 * pow2 s + a2 * pow2 (n1+s))) =
Math.distributivity_add_left a1 (a2 * pow2 n1) (pow2 s);
Math.paren_mul_right a2 (pow2 n1) (pow2 s);
Math.pow2_plus n1 s
#push-options "--z3rlimit 40"
let shift_left_large_lemma (#n1: nat) (#n2: nat) (a1:UInt.uint_t n1) (a2:UInt.uint_t n2) (s: nat{s >= n2}) :
Lemma (((a1 + a2 * pow2 n1) * pow2 s) % pow2 (n1+n2) ==
(a1 * pow2 s) % pow2 (n1+n2)) =
shift_left_large_val a1 a2 s;
mod_add (a1 * pow2 s) (a2 * pow2 (n1+s)) (pow2 (n1+n2));
shift_past_mod a2 (n1+n2) (n1+s);
mod_double (a1 * pow2 s) (pow2 (n1+n2));
()
#pop-options
val shift_left_large_lemma_t : a:t -> s:nat ->
Lemma (requires (s >= 64))
(ensures ((v a * pow2 s) % pow2 128 ==
(U64.v a.low * pow2 s) % pow2 128))
let shift_left_large_lemma_t a s =
shift_left_large_lemma #64 #64 (U64.v a.low) (U64.v a.high) s
private let u32_64: n:U32.t{U32.v n == 64} = U32.uint_to_t 64
val div_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} -> Lemma
(requires True)
(ensures (a / pow2 (n1 - n2) == a * pow2 n2 / pow2 n1))
let div_pow2_diff a n1 n2 =
Math.pow2_plus n2 (n1-n2);
assert (a * pow2 n2 / pow2 n1 == a * pow2 n2 / (pow2 n2 * pow2 (n1 - n2)));
div_product (a * pow2 n2) (pow2 n2) (pow2 (n1-n2));
mul_div_cancel a (pow2 n2)
val mod_mul_pow2 : n:nat -> e1:nat -> e2:nat ->
Lemma (n % pow2 e1 * pow2 e2 <= pow2 (e1+e2) - pow2 e2)
let mod_mul_pow2 n e1 e2 =
Math.lemma_mod_lt n (pow2 e1);
Math.lemma_mult_le_right (pow2 e2) (n % pow2 e1) (pow2 e1 - 1);
assert (n % pow2 e1 * pow2 e2 <= pow2 e1 * pow2 e2 - pow2 e2);
Math.pow2_plus e1 e2
let pow2_div_bound #b (n:UInt.uint_t b) (s:nat{s <= b}) :
Lemma (n / pow2 s < pow2 (b - s)) =
Math.lemma_div_lt n b s
#push-options "--smtencoding.l_arith_repr native --z3rlimit 40"
let add_u64_shift_left (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r = (U64.v hi * pow2 (U32.v s)) % pow2 64 + U64.v lo / pow2 (64 - U32.v s))) =
let high = U64.shift_left hi s in
let low = U64.shift_right lo (U32.sub u32_64 s) in
let s = U32.v s in
let high_n = U64.v hi % pow2 (64 - s) * pow2 s in
let low_n = U64.v lo / pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 s) (pow2 (64-s));
assert (U64.v high == high_n);
assert (U64.v low == low_n);
pow2_div_bound (U64.v lo) (64-s);
assert (low_n < pow2 s);
mod_mul_pow2 (U64.v hi) (64 - s) s;
U64.add high low
#pop-options
let div_plus_multiple (a:nat) (b:nat) (k:pos) :
Lemma (requires (a < k))
(ensures ((a + b * k) / k == b)) =
Math.division_addition_lemma a k b;
Math.small_division_lemma_1 a k
val div_add_small: n:nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (k1*m / (k1*k2) == (n + k1*m) / (k1*k2)))
let div_add_small n m k1 k2 =
div_product (k1*m) k1 k2;
div_product (n+k1*m) k1 k2;
mul_div_cancel m k1;
assert (k1*m/k1 == m);
div_plus_multiple n m k1
val add_mod_small: n: nat -> m:nat -> k1:pos -> k2:pos ->
Lemma (requires (n < k1))
(ensures (n + (k1 * m) % (k1 * k2) ==
(n + k1 * m) % (k1 * k2)))
let add_mod_small n m k1 k2 =
mod_spec (k1 * m) (k1 * k2);
mod_spec (n + k1 * m) (k1 * k2);
div_add_small n m k1 k2
let mod_then_mul_64 (n:nat) : Lemma (n % pow2 64 * pow2 64 == n * pow2 64 % pow2 128) =
Math.pow2_plus 64 64;
mod_mul n (pow2 64) (pow2 64)
let mul_abc_to_acb (a b c: int) : Lemma (a * b * c == a * c * b) = ()
let add_u64_shift_left_respec (hi lo:U64.t) (s:U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r ->
U64.v r * pow2 64 ==
(U64.v hi * pow2 64) * pow2 (U32.v s) % pow2 128 +
U64.v lo * pow2 (U32.v s) / pow2 64 * pow2 64)) =
let r = add_u64_shift_left hi lo s in
let hi = U64.v hi in
let lo = U64.v lo in
let s = U32.v s in
// spec of add_u64_shift_left
assert (U64.v r == hi * pow2 s % pow2 64 + lo / pow2 (64 - s));
Math.distributivity_add_left (hi * pow2 s % pow2 64) (lo / pow2 (64-s)) (pow2 64);
mod_then_mul_64 (hi * pow2 s);
assert (hi * pow2 s % pow2 64 * pow2 64 == (hi * pow2 s * pow2 64) % pow2 128);
div_pow2_diff lo 64 s;
assert (lo / pow2 (64-s) == lo * pow2 s / pow2 64);
assert (U64.v r * pow2 64 == hi * pow2 s * pow2 64 % pow2 128 + lo * pow2 s / pow2 64 * pow2 64);
mul_abc_to_acb hi (pow2 s) (pow2 64);
r
val add_mod_small' : n:nat -> m:nat -> k:pos ->
Lemma (requires (n + m % k < k))
(ensures (n + m % k == (n + m) % k))
let add_mod_small' n m k =
Math.lemma_mod_lt (n + m % k) k;
Math.modulo_lemma n k;
mod_add n m k
#push-options "--retry 5"
let shift_t_val (a: t) (s: nat) :
Lemma (v a * pow2 s == U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s)) =
Math.pow2_plus 64 s;
()
#pop-options
val mul_mod_bound : n:nat -> s1:nat -> s2:nat{s2>=s1} ->
Lemma (n * pow2 s1 % pow2 s2 <= pow2 s2 - pow2 s1)
#push-options "--retry 5"
let mul_mod_bound n s1 s2 =
// n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1
// n % pow2 (s2-s1) <= pow2 (s2-s1) - 1
// n % pow2 (s2-s1) * pow2 s1 <= pow2 s2 - pow2 s1
mod_mul n (pow2 s1) (pow2 (s2-s1));
// assert (n * pow2 s1 % pow2 s2 == n % pow2 (s2-s1) * pow2 s1);
Math.lemma_mod_lt n (pow2 (s2-s1));
Math.lemma_mult_le_right (pow2 s1) (n % pow2 (s2-s1)) (pow2 (s2-s1) - 1);
Math.pow2_plus (s2-s1) s1
#pop-options
let add_lt_le (a a' b b': int) :
Lemma (requires (a < a' /\ b <= b'))
(ensures (a + b < a' + b')) = ()
let u64_pow2_bound (a: UInt.uint_t 64) (s: nat) :
Lemma (a * pow2 s < pow2 (64+s)) =
Math.pow2_plus 64 s;
Math.lemma_mult_le_right (pow2 s) a (pow2 64)
let shift_t_mod_val' (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + U64.v a.high * pow2 (64+s) % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
u64_pow2_bound a_l s;
mul_mod_bound a_h (64+s) 128;
// assert (a_h * pow2 (64+s) % pow2 128 <= pow2 128 - pow2 (64+s));
add_lt_le (a_l * pow2 s) (pow2 (64+s)) (a_h * pow2 (64+s) % pow2 128) (pow2 128 - pow2 (64+s));
add_mod_small' (a_l * pow2 s) (a_h * pow2 (64+s)) (pow2 128);
shift_t_val a s;
()
let shift_t_mod_val (a: t) (s: nat{s < 64}) :
Lemma ((v a * pow2 s) % pow2 128 ==
U64.v a.low * pow2 s + (U64.v a.high * pow2 64) * pow2 s % pow2 128) =
let a_l = U64.v a.low in
let a_h = U64.v a.high in
shift_t_mod_val' a s;
Math.pow2_plus 64 s;
Math.paren_mul_right a_h (pow2 64) (pow2 s);
()
#push-options "--z3rlimit 20"
let shift_left_small (a: t) (s: U32.t) : Pure t
(requires (U32.v s < 64))
(ensures (fun r -> v r = (v a * pow2 (U32.v s)) % pow2 128)) =
if U32.eq s 0ul then a
else
let r = { low = U64.shift_left a.low s;
high = add_u64_shift_left_respec a.high a.low s; } in
let s = U32.v s in
let a_l = U64.v a.low in
let a_h = U64.v a.high in
mod_spec_rew_n (a_l * pow2 s) (pow2 64);
shift_t_mod_val a s;
r
#pop-options
val shift_left_large : a:t -> s:U32.t{U32.v s >= 64 /\ U32.v s < 128} ->
r:t{v r = (v a * pow2 (U32.v s)) % pow2 128}
#push-options "--z3rlimit 50 --retry 5" // sporadically fails
let shift_left_large a s =
let h_shift = U32.sub s u32_64 in
assert (U32.v h_shift < 64);
let r = { low = U64.uint_to_t 0;
high = U64.shift_left a.low h_shift; } in
assert (U64.v r.high == (U64.v a.low * pow2 (U32.v s - 64)) % pow2 64);
mod_mul (U64.v a.low * pow2 (U32.v s - 64)) (pow2 64) (pow2 64);
Math.pow2_plus (U32.v s - 64) 64;
assert (U64.v r.high * pow2 64 == (U64.v a.low * pow2 (U32.v s)) % pow2 128);
shift_left_large_lemma_t a (U32.v s);
r
#pop-options
let shift_left a s =
if (U32.lt s u32_64) then shift_left_small a s
else shift_left_large a s
let add_u64_shift_right (hi lo: U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r == U64.v lo / pow2 (U32.v s) +
U64.v hi * pow2 (64 - U32.v s) % pow2 64)) =
let low = U64.shift_right lo s in
let high = U64.shift_left hi (U32.sub u32_64 s) in
let s = U32.v s in
let low_n = U64.v lo / pow2 s in
let high_n = U64.v hi % pow2 s * pow2 (64 - s) in
Math.pow2_plus (64-s) s;
mod_mul (U64.v hi) (pow2 (64-s)) (pow2 s);
assert (U64.v high == high_n);
pow2_div_bound (U64.v lo) s;
assert (low_n < pow2 (64 - s));
mod_mul_pow2 (U64.v hi) s (64 - s);
U64.add low high
val mul_pow2_diff: a:nat -> n1:nat -> n2:nat{n2 <= n1} ->
Lemma (a * pow2 (n1 - n2) == a * pow2 n1 / pow2 n2)
let mul_pow2_diff a n1 n2 =
Math.paren_mul_right a (pow2 (n1-n2)) (pow2 n2);
mul_div_cancel (a * pow2 (n1 - n2)) (pow2 n2);
Math.pow2_plus (n1 - n2) n2
let add_u64_shift_right_respec (hi lo:U64.t) (s: U32.t{U32.v s < 64}) : Pure U64.t
(requires (U32.v s <> 0))
(ensures (fun r -> U64.v r == U64.v lo / pow2 (U32.v s) +
U64.v hi * pow2 64 / pow2 (U32.v s) % pow2 64)) =
let r = add_u64_shift_right hi lo s in
let s = U32.v s in
mul_pow2_diff (U64.v hi) 64 s;
r
let mul_div_spec (n:nat) (k:pos) : Lemma (n / k * k == n - n % k) = ()
let mul_distr_sub (n1 n2:nat) (k:nat) : Lemma ((n1 - n2) * k == n1 * k - n2 * k) = ()
val div_product_comm : n1:nat -> k1:pos -> k2:pos ->
Lemma (n1 / k1 / k2 == n1 / k2 / k1)
let div_product_comm n1 k1 k2 =
div_product n1 k1 k2;
div_product n1 k2 k1
val shift_right_reconstruct : a_h:UInt.uint_t 64 -> s:nat{s < 64} ->
Lemma (a_h * pow2 (64-s) == a_h / pow2 s * pow2 64 + a_h * pow2 64 / pow2 s % pow2 64) | false | false | FStar.UInt128.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val shift_right_reconstruct : a_h:UInt.uint_t 64 -> s:nat{s < 64} ->
Lemma (a_h * pow2 (64-s) == a_h / pow2 s * pow2 64 + a_h * pow2 64 / pow2 s % pow2 64) | [] | FStar.UInt128.shift_right_reconstruct | {
"file_name": "ulib/FStar.UInt128.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a_h: FStar.UInt.uint_t 64 -> s: Prims.nat{s < 64}
-> FStar.Pervasives.Lemma
(ensures
a_h * Prims.pow2 (64 - s) ==
(a_h / Prims.pow2 s) * Prims.pow2 64 + a_h * Prims.pow2 64 / Prims.pow2 s % Prims.pow2 64) | {
"end_col": 4,
"end_line": 764,
"start_col": 2,
"start_line": 759
} |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.