file_name
stringlengths 5
52
| name
stringlengths 4
95
| original_source_type
stringlengths 0
23k
| source_type
stringlengths 9
23k
| source_definition
stringlengths 9
57.9k
| source
dict | source_range
dict | file_context
stringlengths 0
721k
| dependencies
dict | opens_and_abbrevs
listlengths 2
94
| vconfig
dict | interleaved
bool 1
class | verbose_type
stringlengths 1
7.42k
| effect
stringclasses 118
values | effect_flags
sequencelengths 0
2
| mutual_with
sequencelengths 0
11
| ideal_premises
sequencelengths 0
236
| proof_features
sequencelengths 0
1
| is_simple_lemma
bool 2
classes | is_div
bool 2
classes | is_proof
bool 2
classes | is_simply_typed
bool 2
classes | is_type
bool 2
classes | partial_definition
stringlengths 5
3.99k
| completed_definiton
stringlengths 1
1.63M
| isa_cross_project_example
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Vale.X64.Decls.fsti | Vale.X64.Decls.va_is_dst_opr128 | val va_is_dst_opr128 : o: Vale.X64.Machine_s.operand128 -> s: Vale.X64.Decls.va_state -> Vale.Def.Prop_s.prop0 | let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 90,
"end_line": 245,
"start_col": 19,
"start_line": 245
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | o: Vale.X64.Machine_s.operand128 -> s: Vale.X64.Decls.va_state -> Vale.Def.Prop_s.prop0 | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Machine_s.operand128",
"Vale.X64.Decls.va_state",
"Vale.X64.Decls.valid_operand128",
"Vale.Def.Prop_s.prop0"
] | [] | false | false | false | true | false | let va_is_dst_opr128 (o: operand128) (s: va_state) =
| valid_operand128 o s | false |
|
Vale.X64.Decls.fsti | Vale.X64.Decls.va_is_src_shift_amt64 | val va_is_src_shift_amt64 : o: Vale.X64.Machine_s.operand64 -> s: Vale.X64.Decls.va_state -> Prims.logical | let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64 | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 125,
"end_line": 239,
"start_col": 19,
"start_line": 239
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | o: Vale.X64.Machine_s.operand64 -> s: Vale.X64.Decls.va_state -> Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Machine_s.operand64",
"Vale.X64.Decls.va_state",
"Prims.l_and",
"Vale.X64.Decls.valid_operand",
"Prims.b2t",
"Prims.op_LessThan",
"Vale.X64.Decls.va_eval_shift_amt64",
"Prims.logical"
] | [] | false | false | false | true | true | let va_is_src_shift_amt64 (o: operand64) (s: va_state) =
| valid_operand o s /\ (va_eval_shift_amt64 s o) < 64 | false |
|
Vale.X64.Decls.fsti | Vale.X64.Decls.va_is_dst_dst_opr64 | val va_is_dst_dst_opr64 : o: Vale.X64.Machine_s.operand64 -> s: Vale.X64.Decls.va_state -> Prims.bool | let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 91,
"end_line": 238,
"start_col": 19,
"start_line": 238
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | o: Vale.X64.Machine_s.operand64 -> s: Vale.X64.Decls.va_state -> Prims.bool | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Machine_s.operand64",
"Vale.X64.Decls.va_state",
"Vale.X64.Decls.va_is_dst_opr64",
"Prims.bool"
] | [] | false | false | false | true | false | let va_is_dst_dst_opr64 (o: operand64) (s: va_state) =
| va_is_dst_opr64 o s | false |
|
Vale.X64.Decls.fsti | Vale.X64.Decls.va_is_dst_xmm | val va_is_dst_xmm : x: Vale.X64.Machine_s.reg_xmm -> s: Vale.X64.Decls.va_state -> Prims.logical | let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 68,
"end_line": 243,
"start_col": 19,
"start_line": 243
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o)) | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | x: Vale.X64.Machine_s.reg_xmm -> s: Vale.X64.Decls.va_state -> Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Machine_s.reg_xmm",
"Vale.X64.Decls.va_state",
"Prims.l_True",
"Prims.logical"
] | [] | false | false | false | true | true | let va_is_dst_xmm (x: reg_xmm) (s: va_state) =
| True | false |
|
Vale.X64.Decls.fsti | Vale.X64.Decls.va_eval_opr128 | val va_eval_opr128 (s: va_state) (o: operand128) : GTot quad32 | val va_eval_opr128 (s: va_state) (o: operand128) : GTot quad32 | let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 108,
"end_line": 232,
"start_col": 19,
"start_line": 232
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | s: Vale.X64.Decls.va_state -> o: Vale.X64.Machine_s.operand128 -> Prims.GTot Vale.X64.Decls.quad32 | Prims.GTot | [
"sometrivial"
] | [] | [
"Vale.X64.Decls.va_state",
"Vale.X64.Machine_s.operand128",
"Vale.X64.State.eval_operand128",
"Vale.X64.Decls.quad32"
] | [] | false | false | false | false | false | let va_eval_opr128 (s: va_state) (o: operand128) : GTot quad32 =
| eval_operand128 o s | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.va_is_src_xmm | val va_is_src_xmm : x: Vale.X64.Machine_s.reg_xmm -> s: Vale.X64.Decls.va_state -> Prims.logical | let va_is_src_xmm (x:reg_xmm) (s:va_state) = True | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 68,
"end_line": 242,
"start_col": 19,
"start_line": 242
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | x: Vale.X64.Machine_s.reg_xmm -> s: Vale.X64.Decls.va_state -> Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Machine_s.reg_xmm",
"Vale.X64.Decls.va_state",
"Prims.l_True",
"Prims.logical"
] | [] | false | false | false | true | true | let va_is_src_xmm (x: reg_xmm) (s: va_state) =
| True | false |
|
Vale.X64.Decls.fsti | Vale.X64.Decls.update_register | val update_register (r: reg) (sM sK: va_state) : va_state | val update_register (r: reg) (sM sK: va_state) : va_state | let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 35,
"end_line": 253,
"start_col": 19,
"start_line": 252
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | r: Vale.X64.Machine_s.reg -> sM: Vale.X64.Decls.va_state -> sK: Vale.X64.Decls.va_state
-> Vale.X64.Decls.va_state | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Machine_s.reg",
"Vale.X64.Decls.va_state",
"Vale.X64.Decls.upd_register",
"Vale.X64.State.eval_reg"
] | [] | false | false | false | true | false | let update_register (r: reg) (sM sK: va_state) : va_state =
| upd_register r (eval_reg r sM) sK | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.va_is_src_opr128 | val va_is_src_opr128 : o: Vale.X64.Machine_s.operand128 -> s: Vale.X64.Decls.va_state -> Vale.Def.Prop_s.prop0 | let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 90,
"end_line": 244,
"start_col": 19,
"start_line": 244
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | o: Vale.X64.Machine_s.operand128 -> s: Vale.X64.Decls.va_state -> Vale.Def.Prop_s.prop0 | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Machine_s.operand128",
"Vale.X64.Decls.va_state",
"Vale.X64.Decls.valid_operand128",
"Vale.Def.Prop_s.prop0"
] | [] | false | false | false | true | false | let va_is_src_opr128 (o: operand128) (s: va_state) =
| valid_operand128 o s | false |
|
Vale.X64.Decls.fsti | Vale.X64.Decls.va_is_dst_heaplet | val va_is_dst_heaplet : h: Vale.X64.Decls.heaplet_id -> s: Vale.X64.Decls.va_state -> Prims.logical | let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 75,
"end_line": 247,
"start_col": 19,
"start_line": 247
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | h: Vale.X64.Decls.heaplet_id -> s: Vale.X64.Decls.va_state -> Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Decls.heaplet_id",
"Vale.X64.Decls.va_state",
"Prims.l_True",
"Prims.logical"
] | [] | false | false | false | true | true | let va_is_dst_heaplet (h: heaplet_id) (s: va_state) =
| True | false |
|
Vale.X64.Decls.fsti | Vale.X64.Decls.va_update_flags | val va_update_flags (sM sK: va_state) : va_state | val va_update_flags (sM sK: va_state) : va_state | let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 107,
"end_line": 251,
"start_col": 19,
"start_line": 251
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *) | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | sM: Vale.X64.Decls.va_state -> sK: Vale.X64.Decls.va_state -> Vale.X64.Decls.va_state | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Decls.va_state",
"Vale.X64.Decls.va_upd_flags",
"Vale.X64.State.__proj__Mkvale_state__item__vs_flags"
] | [] | false | false | false | true | false | let va_update_flags (sM sK: va_state) : va_state =
| va_upd_flags sM.vs_flags sK | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.va_update_xmm | val va_update_xmm (x: reg_xmm) (sM sK: va_state) : va_state | val va_update_xmm (x: reg_xmm) (sM sK: va_state) : va_state | let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 37,
"end_line": 257,
"start_col": 19,
"start_line": 256
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state = | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | x: Vale.X64.Machine_s.reg_xmm -> sM: Vale.X64.Decls.va_state -> sK: Vale.X64.Decls.va_state
-> Vale.X64.Decls.va_state | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Machine_s.reg_xmm",
"Vale.X64.Decls.va_state",
"Vale.X64.Decls.va_upd_xmm",
"Vale.X64.State.eval_reg_xmm"
] | [] | false | false | false | true | false | let va_update_xmm (x: reg_xmm) (sM sK: va_state) : va_state =
| va_upd_xmm x (eval_reg_xmm x sM) sK | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.va_update_mem_layout | val va_update_mem_layout (sM sK: va_state) : va_state | val va_update_mem_layout (sM sK: va_state) : va_state | let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 126,
"end_line": 259,
"start_col": 19,
"start_line": 259
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | sM: Vale.X64.Decls.va_state -> sK: Vale.X64.Decls.va_state -> Vale.X64.Decls.va_state | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Decls.va_state",
"Vale.X64.Decls.va_upd_mem_layout",
"Vale.Arch.HeapImpl.__proj__Mkvale_full_heap__item__vf_layout",
"Vale.X64.State.__proj__Mkvale_state__item__vs_heap"
] | [] | false | false | false | true | false | let va_update_mem_layout (sM sK: va_state) : va_state =
| va_upd_mem_layout sM.vs_heap.vf_layout sK | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.va_update_mem_heaplet | val va_update_mem_heaplet (n: heaplet_id) (sM sK: va_state) : va_state | val va_update_mem_heaplet (n: heaplet_id) (sM sK: va_state) : va_state | let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 62,
"end_line": 261,
"start_col": 19,
"start_line": 260
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | n: Vale.X64.Decls.heaplet_id -> sM: Vale.X64.Decls.va_state -> sK: Vale.X64.Decls.va_state
-> Vale.X64.Decls.va_state | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Decls.heaplet_id",
"Vale.X64.Decls.va_state",
"Vale.X64.Decls.va_upd_mem_heaplet",
"Vale.Lib.Map16.sel",
"Vale.Arch.HeapImpl.vale_heap",
"Vale.Arch.HeapImpl.__proj__Mkvale_full_heap__item__vf_heaplets",
"Vale.X64.State.__proj__Mkvale_state__item__vs_heap"
] | [] | false | false | false | true | false | let va_update_mem_heaplet (n: heaplet_id) (sM sK: va_state) : va_state =
| va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.va_is_src_heaplet | val va_is_src_heaplet : h: Vale.X64.Decls.heaplet_id -> s: Vale.X64.Decls.va_state -> Prims.logical | let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 75,
"end_line": 246,
"start_col": 19,
"start_line": 246
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | h: Vale.X64.Decls.heaplet_id -> s: Vale.X64.Decls.va_state -> Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Decls.heaplet_id",
"Vale.X64.Decls.va_state",
"Prims.l_True",
"Prims.logical"
] | [] | false | false | false | true | true | let va_is_src_heaplet (h: heaplet_id) (s: va_state) =
| True | false |
|
Vale.X64.Decls.fsti | Vale.X64.Decls.va_update_operand_dst_opr64 | val va_update_operand_dst_opr64 (o: operand64) (sM sK: va_state) : va_state | val va_update_operand_dst_opr64 (o: operand64) (sM sK: va_state) : va_state | let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 28,
"end_line": 279,
"start_col": 0,
"start_line": 278
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | o: Vale.X64.Machine_s.operand64 -> sM: Vale.X64.Decls.va_state -> sK: Vale.X64.Decls.va_state
-> Vale.X64.Decls.va_state | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Machine_s.operand64",
"Vale.X64.Decls.va_state",
"Vale.X64.Decls.update_dst_operand"
] | [] | false | false | false | true | false | let va_update_operand_dst_opr64 (o: operand64) (sM sK: va_state) : va_state =
| update_dst_operand o sM sK | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.va_update_mem | val va_update_mem (sM sK: va_state) : va_state | val va_update_mem (sM sK: va_state) : va_state | let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 110,
"end_line": 258,
"start_col": 19,
"start_line": 258
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state = | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | sM: Vale.X64.Decls.va_state -> sK: Vale.X64.Decls.va_state -> Vale.X64.Decls.va_state | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Decls.va_state",
"Vale.X64.Decls.va_upd_mem",
"Vale.Arch.HeapImpl.__proj__Mkvale_full_heap__item__vf_heap",
"Vale.X64.State.__proj__Mkvale_state__item__vs_heap"
] | [] | false | false | false | true | false | let va_update_mem (sM sK: va_state) : va_state =
| va_upd_mem sM.vs_heap.vf_heap sK | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.va_update_operand_heaplet | val va_update_operand_heaplet (h: heaplet_id) (sM sK: va_state) : va_state | val va_update_operand_heaplet (h: heaplet_id) (sM sK: va_state) : va_state | let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 31,
"end_line": 295,
"start_col": 0,
"start_line": 294
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | h: Vale.X64.Decls.heaplet_id -> sM: Vale.X64.Decls.va_state -> sK: Vale.X64.Decls.va_state
-> Vale.X64.Decls.va_state | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Decls.heaplet_id",
"Vale.X64.Decls.va_state",
"Vale.X64.Decls.va_update_mem_heaplet"
] | [] | false | false | false | true | false | let va_update_operand_heaplet (h: heaplet_id) (sM sK: va_state) : va_state =
| va_update_mem_heaplet h sM sK | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.update_dst_operand | val update_dst_operand (o: operand64) (sM sK: va_state) : va_state | val update_dst_operand (o: operand64) (sM sK: va_state) : va_state | let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 24,
"end_line": 275,
"start_col": 0,
"start_line": 274
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | o: Vale.X64.Machine_s.operand64 -> sM: Vale.X64.Decls.va_state -> sK: Vale.X64.Decls.va_state
-> Vale.X64.Decls.va_state | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Machine_s.operand64",
"Vale.X64.Decls.va_state",
"Vale.X64.Decls.update_operand"
] | [] | false | false | false | true | false | let update_dst_operand (o: operand64) (sM sK: va_state) : va_state =
| update_operand o sM sK | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.va_update_ok | val va_update_ok (sM sK: va_state) : va_state | val va_update_ok (sM sK: va_state) : va_state | let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 98,
"end_line": 250,
"start_col": 19,
"start_line": 250
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | sM: Vale.X64.Decls.va_state -> sK: Vale.X64.Decls.va_state -> Vale.X64.Decls.va_state | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Decls.va_state",
"Vale.X64.Decls.va_upd_ok",
"Vale.X64.State.__proj__Mkvale_state__item__vs_ok"
] | [] | false | false | false | true | false | let va_update_ok (sM sK: va_state) : va_state =
| va_upd_ok sM.vs_ok sK | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.va_update_reg64 | val va_update_reg64 (r: reg_64) (sM sK: va_state) : va_state | val va_update_reg64 (r: reg_64) (sM sK: va_state) : va_state | let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 38,
"end_line": 255,
"start_col": 19,
"start_line": 254
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state = | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | r: Vale.X64.Machine_s.reg_64 -> sM: Vale.X64.Decls.va_state -> sK: Vale.X64.Decls.va_state
-> Vale.X64.Decls.va_state | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Machine_s.reg_64",
"Vale.X64.Decls.va_state",
"Vale.X64.Decls.va_upd_reg64",
"Vale.X64.State.eval_reg_64"
] | [] | false | false | false | true | false | let va_update_reg64 (r: reg_64) (sM sK: va_state) : va_state =
| va_upd_reg64 r (eval_reg_64 r sM) sK | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.va_update_operand_reg_opr64 | val va_update_operand_reg_opr64 (o: operand64) (sM sK: va_state) : va_state | val va_update_operand_reg_opr64 (o: operand64) (sM sK: va_state) : va_state | let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 28,
"end_line": 287,
"start_col": 0,
"start_line": 286
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | o: Vale.X64.Machine_s.operand64 -> sM: Vale.X64.Decls.va_state -> sK: Vale.X64.Decls.va_state
-> Vale.X64.Decls.va_state | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Machine_s.operand64",
"Vale.X64.Decls.va_state",
"Vale.X64.Decls.update_dst_operand"
] | [] | false | false | false | true | false | let va_update_operand_reg_opr64 (o: operand64) (sM sK: va_state) : va_state =
| update_dst_operand o sM sK | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.va_update_stack | val va_update_stack (sM sK: va_state) : va_state | val va_update_stack (sM sK: va_state) : va_state | let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 107,
"end_line": 262,
"start_col": 19,
"start_line": 262
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state = | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | sM: Vale.X64.Decls.va_state -> sK: Vale.X64.Decls.va_state -> Vale.X64.Decls.va_state | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Decls.va_state",
"Vale.X64.Decls.va_upd_stack",
"Vale.X64.State.__proj__Mkvale_state__item__vs_stack"
] | [] | false | false | false | true | false | let va_update_stack (sM sK: va_state) : va_state =
| va_upd_stack sM.vs_stack sK | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.va_value_reg_opr64 | val va_value_reg_opr64 : Type0 | let va_value_reg_opr64 = nat64 | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 37,
"end_line": 299,
"start_col": 7,
"start_line": 299
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64 | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | Type0 | Prims.Tot | [
"total"
] | [] | [
"Vale.Def.Types_s.nat64"
] | [] | false | false | false | true | true | let va_value_reg_opr64 =
| nat64 | false |
|
Vale.X64.Decls.fsti | Vale.X64.Decls.va_update_stackTaint | val va_update_stackTaint (sM sK: va_state) : va_state | val va_update_stackTaint (sM sK: va_state) : va_state | let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 122,
"end_line": 263,
"start_col": 19,
"start_line": 263
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | sM: Vale.X64.Decls.va_state -> sK: Vale.X64.Decls.va_state -> Vale.X64.Decls.va_state | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Decls.va_state",
"Vale.X64.Decls.va_upd_stackTaint",
"Vale.X64.State.__proj__Mkvale_state__item__vs_stackTaint"
] | [] | false | false | false | true | false | let va_update_stackTaint (sM sK: va_state) : va_state =
| va_upd_stackTaint sM.vs_stackTaint sK | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.va_value_opr64 | val va_value_opr64 : Type0 | let va_value_opr64 = nat64 | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 33,
"end_line": 297,
"start_col": 7,
"start_line": 297
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | Type0 | Prims.Tot | [
"total"
] | [] | [
"Vale.Def.Types_s.nat64"
] | [] | false | false | false | true | true | let va_value_opr64 =
| nat64 | false |
|
Vale.X64.Decls.fsti | Vale.X64.Decls.va_value_xmm | val va_value_xmm : Prims.eqtype | let va_value_xmm = quad32 | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 32,
"end_line": 300,
"start_col": 7,
"start_line": 300
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64 | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | Prims.eqtype | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Decls.quad32"
] | [] | false | false | false | true | false | let va_value_xmm =
| quad32 | false |
|
Vale.X64.Decls.fsti | Vale.X64.Decls.va_update_operand_opr64 | val va_update_operand_opr64 (o: operand64) (sM sK: va_state) : va_state | val va_update_operand_opr64 (o: operand64) (sM sK: va_state) : va_state | let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 28,
"end_line": 283,
"start_col": 0,
"start_line": 282
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | o: Vale.X64.Machine_s.operand64 -> sM: Vale.X64.Decls.va_state -> sK: Vale.X64.Decls.va_state
-> Vale.X64.Decls.va_state | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Machine_s.operand64",
"Vale.X64.Decls.va_state",
"Vale.X64.Decls.update_dst_operand"
] | [] | false | false | false | true | false | let va_update_operand_opr64 (o: operand64) (sM sK: va_state) : va_state =
| update_dst_operand o sM sK | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.va_value_dst_opr64 | val va_value_dst_opr64 : Type0 | let va_value_dst_opr64 = nat64 | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 37,
"end_line": 298,
"start_col": 7,
"start_line": 298
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | Type0 | Prims.Tot | [
"total"
] | [] | [
"Vale.Def.Types_s.nat64"
] | [] | false | false | false | true | true | let va_value_dst_opr64 =
| nat64 | false |
|
Vale.X64.Decls.fsti | Vale.X64.Decls.va_upd_operand_xmm | val va_upd_operand_xmm (x: reg_xmm) (v: quad32) (s: vale_state) : vale_state | val va_upd_operand_xmm (x: reg_xmm) (v: quad32) (s: vale_state) : vale_state | let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 22,
"end_line": 305,
"start_col": 0,
"start_line": 304
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | x: Vale.X64.Machine_s.reg_xmm -> v: Vale.X64.Decls.quad32 -> s: Vale.X64.State.vale_state
-> Vale.X64.State.vale_state | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Machine_s.reg_xmm",
"Vale.X64.Decls.quad32",
"Vale.X64.State.vale_state",
"Vale.X64.State.update_reg_xmm"
] | [] | false | false | false | true | false | let va_upd_operand_xmm (x: reg_xmm) (v: quad32) (s: vale_state) : vale_state =
| update_reg_xmm x v s | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.va_value_heaplet | val va_value_heaplet : Type | let va_value_heaplet = vale_heap | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 39,
"end_line": 301,
"start_col": 7,
"start_line": 301
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64 | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | Type | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Decls.vale_heap"
] | [] | false | false | false | true | true | let va_value_heaplet =
| vale_heap | false |
|
Vale.X64.Decls.fsti | Vale.X64.Decls.va_update_operand_xmm | val va_update_operand_xmm (x: reg_xmm) (sM sK: va_state) : va_state | val va_update_operand_xmm (x: reg_xmm) (sM sK: va_state) : va_state | let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 41,
"end_line": 291,
"start_col": 0,
"start_line": 290
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | x: Vale.X64.Machine_s.reg_xmm -> sM: Vale.X64.Decls.va_state -> sK: Vale.X64.Decls.va_state
-> Vale.X64.Decls.va_state | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Machine_s.reg_xmm",
"Vale.X64.Decls.va_state",
"Vale.X64.State.update_reg_xmm",
"Vale.X64.State.eval_reg_xmm"
] | [] | false | false | false | true | false | let va_update_operand_xmm (x: reg_xmm) (sM sK: va_state) : va_state =
| update_reg_xmm x (eval_reg_xmm x sM) sK | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.va_is_dst_opr64 | val va_is_dst_opr64 : o: Vale.X64.Machine_s.operand64 -> s: Vale.X64.Decls.va_state -> Prims.bool | let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 112,
"end_line": 237,
"start_col": 12,
"start_line": 237
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *) | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | o: Vale.X64.Machine_s.operand64 -> s: Vale.X64.Decls.va_state -> Prims.bool | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Machine_s.operand64",
"Vale.X64.Decls.va_state",
"Vale.X64.Machine_s.reg_64",
"Prims.op_Negation",
"Prims.op_Equality",
"Vale.X64.Machine_s.rRsp",
"Vale.X64.Machine_s.operand",
"Vale.X64.Machine_s.nat64",
"Prims.bool"
] | [] | false | false | false | true | false | let va_is_dst_opr64 (o: operand64) (s: va_state) =
| match o with
| OReg r -> not (r = rRsp)
| _ -> false | false |
|
Vale.X64.Decls.fsti | Vale.X64.Decls.va_upd_operand_heaplet | val va_upd_operand_heaplet (h: heaplet_id) (v: vale_heap) (s: va_state) : va_state | val va_upd_operand_heaplet (h: heaplet_id) (v: vale_heap) (s: va_state) : va_state | let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 113,
"end_line": 324,
"start_col": 7,
"start_line": 324
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | h: Vale.X64.Decls.heaplet_id -> v: Vale.X64.Decls.vale_heap -> s: Vale.X64.Decls.va_state
-> Vale.X64.Decls.va_state | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Decls.heaplet_id",
"Vale.X64.Decls.vale_heap",
"Vale.X64.Decls.va_state",
"Vale.X64.Decls.va_upd_mem_heaplet"
] | [] | false | false | false | true | false | let va_upd_operand_heaplet (h: heaplet_id) (v: vale_heap) (s: va_state) : va_state =
| va_upd_mem_heaplet h v s | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.va_is_dst_reg_opr64 | val va_is_dst_reg_opr64 : o: Vale.X64.Machine_s.operand64 -> s: Vale.X64.Decls.va_state -> Prims.logical | let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o)) | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 107,
"end_line": 241,
"start_col": 19,
"start_line": 241
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64 | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | o: Vale.X64.Machine_s.operand64 -> s: Vale.X64.Decls.va_state -> Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Machine_s.operand64",
"Vale.X64.Decls.va_state",
"Prims.l_and",
"Prims.b2t",
"Vale.X64.Machine_s.uu___is_OReg",
"Vale.X64.Machine_s.nat64",
"Vale.X64.Machine_s.reg_64",
"Prims.op_Negation",
"Prims.op_Equality",
"Vale.X64.Machine_s.rRsp",
"Vale.X64.Machine_s.__proj__OReg__item__r",
"Prims.logical"
] | [] | false | false | false | true | true | let va_is_dst_reg_opr64 (o: operand64) (s: va_state) =
| OReg? o /\ not (rRsp = (OReg?.r o)) | false |
|
Vale.X64.Decls.fsti | Vale.X64.Decls.va_Block | val va_Block (block: va_codes) : va_code | val va_Block (block: va_codes) : va_code | let va_Block (block:va_codes) : va_code = Block block | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 60,
"end_line": 339,
"start_col": 7,
"start_line": 339
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | block: Vale.X64.Decls.va_codes -> Vale.X64.Decls.va_code | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Decls.va_codes",
"Vale.X64.Machine_s.Block",
"Vale.X64.Decls.ins",
"Vale.X64.Decls.ocmp",
"Vale.X64.Decls.va_code"
] | [] | false | false | false | true | false | let va_Block (block: va_codes) : va_code =
| Block block | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.va_IfElse | val va_IfElse (ifCond: ocmp) (ifTrue ifFalse: va_code) : va_code | val va_IfElse (ifCond: ocmp) (ifTrue ifFalse: va_code) : va_code | let va_IfElse (ifCond:ocmp) (ifTrue:va_code) (ifFalse:va_code) : va_code = IfElse ifCond ifTrue ifFalse | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 110,
"end_line": 340,
"start_col": 7,
"start_line": 340
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl
(** Constructors for va_code *) | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | ifCond: Vale.X64.Decls.ocmp -> ifTrue: Vale.X64.Decls.va_code -> ifFalse: Vale.X64.Decls.va_code
-> Vale.X64.Decls.va_code | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Decls.ocmp",
"Vale.X64.Decls.va_code",
"Vale.X64.Machine_s.IfElse",
"Vale.X64.Decls.ins"
] | [] | false | false | false | true | false | let va_IfElse (ifCond: ocmp) (ifTrue ifFalse: va_code) : va_code =
| IfElse ifCond ifTrue ifFalse | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.va_CNil | val va_CNil: Prims.unit -> va_codes | val va_CNil: Prims.unit -> va_codes | let va_CNil () : va_codes = [] | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 49,
"end_line": 335,
"start_col": 19,
"start_line": 335
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= () | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | _: Prims.unit -> Vale.X64.Decls.va_codes | Prims.Tot | [
"total"
] | [] | [
"Prims.unit",
"Prims.Nil",
"Vale.X64.Decls.va_code",
"Vale.X64.Decls.va_codes"
] | [] | false | false | false | true | false | let va_CNil () : va_codes =
| [] | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.va_get_ifTrue | val va_get_ifTrue (c: va_code{IfElse? c}) : va_code | val va_get_ifTrue (c: va_code{IfElse? c}) : va_code | let va_get_ifTrue (c:va_code{IfElse? c}) : va_code = IfElse?.ifTrue c | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 76,
"end_line": 352,
"start_col": 7,
"start_line": 352
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl
(** Constructors for va_code *)
unfold let va_Block (block:va_codes) : va_code = Block block
unfold let va_IfElse (ifCond:ocmp) (ifTrue:va_code) (ifFalse:va_code) : va_code = IfElse ifCond ifTrue ifFalse
unfold let va_While (whileCond:ocmp) (whileBody:va_code) : va_code = While whileCond whileBody
val va_cmp_eq (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ne (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_le (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ge (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_lt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_gt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
unfold let va_get_block (c:va_code{Block? c}) : va_codes = Block?.block c | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | c: Vale.X64.Decls.va_code{IfElse? c} -> Vale.X64.Decls.va_code | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Decls.va_code",
"Prims.b2t",
"Vale.X64.Machine_s.uu___is_IfElse",
"Vale.X64.Decls.ins",
"Vale.X64.Decls.ocmp",
"Vale.X64.Machine_s.__proj__IfElse__item__ifTrue"
] | [] | false | false | false | false | false | let va_get_ifTrue (c: va_code{IfElse? c}) : va_code =
| IfElse?.ifTrue c | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.va_get_ifCond | val va_get_ifCond (c: va_code{IfElse? c}) : ocmp | val va_get_ifCond (c: va_code{IfElse? c}) : ocmp | let va_get_ifCond (c:va_code{IfElse? c}) : ocmp = IfElse?.ifCond c | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 73,
"end_line": 351,
"start_col": 7,
"start_line": 351
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl
(** Constructors for va_code *)
unfold let va_Block (block:va_codes) : va_code = Block block
unfold let va_IfElse (ifCond:ocmp) (ifTrue:va_code) (ifFalse:va_code) : va_code = IfElse ifCond ifTrue ifFalse
unfold let va_While (whileCond:ocmp) (whileBody:va_code) : va_code = While whileCond whileBody
val va_cmp_eq (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ne (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_le (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ge (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_lt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_gt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | c: Vale.X64.Decls.va_code{IfElse? c} -> Vale.X64.Decls.ocmp | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Decls.va_code",
"Prims.b2t",
"Vale.X64.Machine_s.uu___is_IfElse",
"Vale.X64.Decls.ins",
"Vale.X64.Decls.ocmp",
"Vale.X64.Machine_s.__proj__IfElse__item__ifCond"
] | [] | false | false | false | false | false | let va_get_ifCond (c: va_code{IfElse? c}) : ocmp =
| IfElse?.ifCond c | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.va_is_src_reg_opr64 | val va_is_src_reg_opr64 : o: Vale.X64.Machine_s.operand64 -> s: Vale.X64.Decls.va_state -> Prims.bool | let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 79,
"end_line": 240,
"start_col": 19,
"start_line": 240
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | o: Vale.X64.Machine_s.operand64 -> s: Vale.X64.Decls.va_state -> Prims.bool | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Machine_s.operand64",
"Vale.X64.Decls.va_state",
"Vale.X64.Machine_s.uu___is_OReg",
"Vale.X64.Machine_s.nat64",
"Vale.X64.Machine_s.reg_64",
"Prims.bool"
] | [] | false | false | false | true | false | let va_is_src_reg_opr64 (o: operand64) (s: va_state) =
| OReg? o | false |
|
Vale.X64.Decls.fsti | Vale.X64.Decls.va_While | val va_While (whileCond: ocmp) (whileBody: va_code) : va_code | val va_While (whileCond: ocmp) (whileBody: va_code) : va_code | let va_While (whileCond:ocmp) (whileBody:va_code) : va_code = While whileCond whileBody | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 94,
"end_line": 341,
"start_col": 7,
"start_line": 341
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl
(** Constructors for va_code *)
unfold let va_Block (block:va_codes) : va_code = Block block | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | whileCond: Vale.X64.Decls.ocmp -> whileBody: Vale.X64.Decls.va_code -> Vale.X64.Decls.va_code | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Decls.ocmp",
"Vale.X64.Decls.va_code",
"Vale.X64.Machine_s.While",
"Vale.X64.Decls.ins"
] | [] | false | false | false | true | false | let va_While (whileCond: ocmp) (whileBody: va_code) : va_code =
| While whileCond whileBody | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.va_get_whileCond | val va_get_whileCond (c: va_code{While? c}) : ocmp | val va_get_whileCond (c: va_code{While? c}) : ocmp | let va_get_whileCond (c:va_code{While? c}) : ocmp = While?.whileCond c | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 77,
"end_line": 354,
"start_col": 7,
"start_line": 354
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl
(** Constructors for va_code *)
unfold let va_Block (block:va_codes) : va_code = Block block
unfold let va_IfElse (ifCond:ocmp) (ifTrue:va_code) (ifFalse:va_code) : va_code = IfElse ifCond ifTrue ifFalse
unfold let va_While (whileCond:ocmp) (whileBody:va_code) : va_code = While whileCond whileBody
val va_cmp_eq (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ne (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_le (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ge (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_lt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_gt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
unfold let va_get_block (c:va_code{Block? c}) : va_codes = Block?.block c
unfold let va_get_ifCond (c:va_code{IfElse? c}) : ocmp = IfElse?.ifCond c
unfold let va_get_ifTrue (c:va_code{IfElse? c}) : va_code = IfElse?.ifTrue c | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | c: Vale.X64.Decls.va_code{While? c} -> Vale.X64.Decls.ocmp | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Decls.va_code",
"Prims.b2t",
"Vale.X64.Machine_s.uu___is_While",
"Vale.X64.Decls.ins",
"Vale.X64.Decls.ocmp",
"Vale.X64.Machine_s.__proj__While__item__whileCond"
] | [] | false | false | false | false | false | let va_get_whileCond (c: va_code{While? c}) : ocmp =
| While?.whileCond c | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.va_CCons | val va_CCons (hd: va_code) (tl: va_codes) : va_codes | val va_CCons (hd: va_code) (tl: va_codes) : va_codes | let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 78,
"end_line": 336,
"start_col": 19,
"start_line": 336
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *) | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | hd: Vale.X64.Decls.va_code -> tl: Vale.X64.Decls.va_codes -> Vale.X64.Decls.va_codes | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Decls.va_code",
"Vale.X64.Decls.va_codes",
"Prims.Cons"
] | [] | false | false | false | true | false | let va_CCons (hd: va_code) (tl: va_codes) : va_codes =
| hd :: tl | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.va_get_block | val va_get_block (c: va_code{Block? c}) : va_codes | val va_get_block (c: va_code{Block? c}) : va_codes | let va_get_block (c:va_code{Block? c}) : va_codes = Block?.block c | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 73,
"end_line": 350,
"start_col": 7,
"start_line": 350
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl
(** Constructors for va_code *)
unfold let va_Block (block:va_codes) : va_code = Block block
unfold let va_IfElse (ifCond:ocmp) (ifTrue:va_code) (ifFalse:va_code) : va_code = IfElse ifCond ifTrue ifFalse
unfold let va_While (whileCond:ocmp) (whileBody:va_code) : va_code = While whileCond whileBody
val va_cmp_eq (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ne (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_le (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ge (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_lt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_gt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | c: Vale.X64.Decls.va_code{Block? c} -> Vale.X64.Decls.va_codes | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Decls.va_code",
"Prims.b2t",
"Vale.X64.Machine_s.uu___is_Block",
"Vale.X64.Decls.ins",
"Vale.X64.Decls.ocmp",
"Vale.X64.Machine_s.__proj__Block__item__block",
"Vale.X64.Decls.va_codes"
] | [] | false | false | false | false | false | let va_get_block (c: va_code{Block? c}) : va_codes =
| Block?.block c | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.va_get_ifFalse | val va_get_ifFalse (c: va_code{IfElse? c}) : va_code | val va_get_ifFalse (c: va_code{IfElse? c}) : va_code | let va_get_ifFalse (c:va_code{IfElse? c}) : va_code = IfElse?.ifFalse c | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 78,
"end_line": 353,
"start_col": 7,
"start_line": 353
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl
(** Constructors for va_code *)
unfold let va_Block (block:va_codes) : va_code = Block block
unfold let va_IfElse (ifCond:ocmp) (ifTrue:va_code) (ifFalse:va_code) : va_code = IfElse ifCond ifTrue ifFalse
unfold let va_While (whileCond:ocmp) (whileBody:va_code) : va_code = While whileCond whileBody
val va_cmp_eq (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ne (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_le (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ge (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_lt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_gt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
unfold let va_get_block (c:va_code{Block? c}) : va_codes = Block?.block c
unfold let va_get_ifCond (c:va_code{IfElse? c}) : ocmp = IfElse?.ifCond c | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | c: Vale.X64.Decls.va_code{IfElse? c} -> Vale.X64.Decls.va_code | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Decls.va_code",
"Prims.b2t",
"Vale.X64.Machine_s.uu___is_IfElse",
"Vale.X64.Decls.ins",
"Vale.X64.Decls.ocmp",
"Vale.X64.Machine_s.__proj__IfElse__item__ifFalse"
] | [] | false | false | false | false | false | let va_get_ifFalse (c: va_code{IfElse? c}) : va_code =
| IfElse?.ifFalse c | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.va_get_whileBody | val va_get_whileBody (c: va_code{While? c}) : va_code | val va_get_whileBody (c: va_code{While? c}) : va_code | let va_get_whileBody (c:va_code{While? c}) : va_code = While?.whileBody c | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 80,
"end_line": 355,
"start_col": 7,
"start_line": 355
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl
(** Constructors for va_code *)
unfold let va_Block (block:va_codes) : va_code = Block block
unfold let va_IfElse (ifCond:ocmp) (ifTrue:va_code) (ifFalse:va_code) : va_code = IfElse ifCond ifTrue ifFalse
unfold let va_While (whileCond:ocmp) (whileBody:va_code) : va_code = While whileCond whileBody
val va_cmp_eq (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ne (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_le (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ge (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_lt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_gt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
unfold let va_get_block (c:va_code{Block? c}) : va_codes = Block?.block c
unfold let va_get_ifCond (c:va_code{IfElse? c}) : ocmp = IfElse?.ifCond c
unfold let va_get_ifTrue (c:va_code{IfElse? c}) : va_code = IfElse?.ifTrue c
unfold let va_get_ifFalse (c:va_code{IfElse? c}) : va_code = IfElse?.ifFalse c | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | c: Vale.X64.Decls.va_code{While? c} -> Vale.X64.Decls.va_code | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Decls.va_code",
"Prims.b2t",
"Vale.X64.Machine_s.uu___is_While",
"Vale.X64.Decls.ins",
"Vale.X64.Decls.ocmp",
"Vale.X64.Machine_s.__proj__While__item__whileBody"
] | [] | false | false | false | false | false | let va_get_whileBody (c: va_code{While? c}) : va_code =
| While?.whileBody c | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.update_operand | val update_operand (o: operand64) (sM sK: va_state) : va_state | val update_operand (o: operand64) (sM sK: va_state) : va_state | let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 42,
"end_line": 271,
"start_col": 0,
"start_line": 266
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | o: Vale.X64.Machine_s.operand64 -> sM: Vale.X64.Decls.va_state -> sK: Vale.X64.Decls.va_state
-> Vale.X64.Decls.va_state | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Machine_s.operand64",
"Vale.X64.Decls.va_state",
"Vale.X64.Machine_s.nat64",
"Vale.X64.Machine_s.reg_64",
"Vale.X64.Decls.va_update_reg64",
"Vale.X64.Machine_s.maddr",
"Vale.Arch.HeapTypes_s.taint",
"Vale.X64.Decls.va_update_mem",
"Vale.X64.Decls.va_update_stack"
] | [] | false | false | false | true | false | let update_operand (o: operand64) (sM sK: va_state) : va_state =
| match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.modifies_buffer128_3 | val modifies_buffer128_3 : b1: Vale.X64.Memory.buffer128 ->
b2: Vale.X64.Memory.buffer128 ->
b3: Vale.X64.Memory.buffer128 ->
h1: Vale.X64.Decls.vale_heap ->
h2: Vale.X64.Decls.vale_heap
-> Vale.Def.Prop_s.prop0 | let modifies_buffer128_3 (b1 b2 b3:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2 | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 96,
"end_line": 399,
"start_col": 7,
"start_line": 398
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl
(** Constructors for va_code *)
unfold let va_Block (block:va_codes) : va_code = Block block
unfold let va_IfElse (ifCond:ocmp) (ifTrue:va_code) (ifFalse:va_code) : va_code = IfElse ifCond ifTrue ifFalse
unfold let va_While (whileCond:ocmp) (whileBody:va_code) : va_code = While whileCond whileBody
val va_cmp_eq (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ne (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_le (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ge (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_lt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_gt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
unfold let va_get_block (c:va_code{Block? c}) : va_codes = Block?.block c
unfold let va_get_ifCond (c:va_code{IfElse? c}) : ocmp = IfElse?.ifCond c
unfold let va_get_ifTrue (c:va_code{IfElse? c}) : va_code = IfElse?.ifTrue c
unfold let va_get_ifFalse (c:va_code{IfElse? c}) : va_code = IfElse?.ifFalse c
unfold let va_get_whileCond (c:va_code{While? c}) : ocmp = While?.whileCond c
unfold let va_get_whileBody (c:va_code{While? c}) : va_code = While?.whileBody c
(** Map syntax **)
//unfold let (.[]) (m:vale_heap) (b:M.buffer64) = fun index -> buffer64_read b index m
// syntax for map accesses, m.[key] and m.[key] <- value
(*
type map (key:eqtype) (value:Type) = Map.t key value
unfold let (.[]) = Map.sel
unfold let (.[]<-) = Map.upd
*)
(** Memory framing **)
(*
unfold let in_mem (addr:int) (m:mem) : bool = m `Map.contains` addr
let disjoint (ptr1:int) (num_bytes1:int) (ptr2:int) (num_bytes2:int) =
ptr1 + num_bytes1 <= ptr2 \/ ptr2 + num_bytes2 <= ptr1
let validSrcAddrs (mem:mem) (addr:int) (size:int) (num_bytes:int) =
size == 64 /\
(forall (a:int) . {:pattern (mem `Map.contains` a)} addr <= a && a < addr+num_bytes && (a - addr) % 8 = 0 ==> mem `Map.contains` a)
let memModified (old_mem:mem) (new_mem:mem) (ptr:int) (num_bytes) =
(forall (a:int) . {:pattern (new_mem `Map.contains` a)} old_mem `Map.contains` a <==> new_mem `Map.contains` a) /\
(forall (a:int) . {:pattern (new_mem.[a]) \/ Map.sel new_mem a} a < ptr || a >= ptr + num_bytes ==> old_mem.[a] == new_mem.[ a])
*)
(** Convenient memory-related functions **)
let rec buffers_readable (h: vale_heap) (l: list M.buffer64) : GTot prop0 (decreases l) =
match l with
| [] -> True
| b :: l' -> buffer_readable h b /\ buffers_readable h l'
unfold let modifies_buffer (b:M.buffer64) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer_2 (b1 b2:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer_3 (b1 b2 b3:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
unfold let modifies_buffer128 (b:M.buffer128) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer128_2 (b1 b2:M.buffer128) (h1 h2:vale_heap) = | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
b1: Vale.X64.Memory.buffer128 ->
b2: Vale.X64.Memory.buffer128 ->
b3: Vale.X64.Memory.buffer128 ->
h1: Vale.X64.Decls.vale_heap ->
h2: Vale.X64.Decls.vale_heap
-> Vale.Def.Prop_s.prop0 | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Memory.buffer128",
"Vale.X64.Decls.vale_heap",
"Vale.X64.Decls.modifies_mem",
"Vale.X64.Memory.loc_union",
"Vale.X64.Decls.loc_buffer",
"Vale.X64.Memory.vuint128",
"Vale.Def.Prop_s.prop0"
] | [] | false | false | false | true | false | let modifies_buffer128_3 (b1 b2 b3: M.buffer128) (h1 h2: vale_heap) =
| modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2 | false |
|
Vale.X64.Decls.fsti | Vale.X64.Decls.modifies_buffer_3 | val modifies_buffer_3 : b1: Vale.X64.Memory.buffer64 ->
b2: Vale.X64.Memory.buffer64 ->
b3: Vale.X64.Memory.buffer64 ->
h1: Vale.X64.Decls.vale_heap ->
h2: Vale.X64.Decls.vale_heap
-> Vale.Def.Prop_s.prop0 | let modifies_buffer_3 (b1 b2 b3:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2 | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 96,
"end_line": 394,
"start_col": 7,
"start_line": 393
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl
(** Constructors for va_code *)
unfold let va_Block (block:va_codes) : va_code = Block block
unfold let va_IfElse (ifCond:ocmp) (ifTrue:va_code) (ifFalse:va_code) : va_code = IfElse ifCond ifTrue ifFalse
unfold let va_While (whileCond:ocmp) (whileBody:va_code) : va_code = While whileCond whileBody
val va_cmp_eq (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ne (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_le (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ge (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_lt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_gt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
unfold let va_get_block (c:va_code{Block? c}) : va_codes = Block?.block c
unfold let va_get_ifCond (c:va_code{IfElse? c}) : ocmp = IfElse?.ifCond c
unfold let va_get_ifTrue (c:va_code{IfElse? c}) : va_code = IfElse?.ifTrue c
unfold let va_get_ifFalse (c:va_code{IfElse? c}) : va_code = IfElse?.ifFalse c
unfold let va_get_whileCond (c:va_code{While? c}) : ocmp = While?.whileCond c
unfold let va_get_whileBody (c:va_code{While? c}) : va_code = While?.whileBody c
(** Map syntax **)
//unfold let (.[]) (m:vale_heap) (b:M.buffer64) = fun index -> buffer64_read b index m
// syntax for map accesses, m.[key] and m.[key] <- value
(*
type map (key:eqtype) (value:Type) = Map.t key value
unfold let (.[]) = Map.sel
unfold let (.[]<-) = Map.upd
*)
(** Memory framing **)
(*
unfold let in_mem (addr:int) (m:mem) : bool = m `Map.contains` addr
let disjoint (ptr1:int) (num_bytes1:int) (ptr2:int) (num_bytes2:int) =
ptr1 + num_bytes1 <= ptr2 \/ ptr2 + num_bytes2 <= ptr1
let validSrcAddrs (mem:mem) (addr:int) (size:int) (num_bytes:int) =
size == 64 /\
(forall (a:int) . {:pattern (mem `Map.contains` a)} addr <= a && a < addr+num_bytes && (a - addr) % 8 = 0 ==> mem `Map.contains` a)
let memModified (old_mem:mem) (new_mem:mem) (ptr:int) (num_bytes) =
(forall (a:int) . {:pattern (new_mem `Map.contains` a)} old_mem `Map.contains` a <==> new_mem `Map.contains` a) /\
(forall (a:int) . {:pattern (new_mem.[a]) \/ Map.sel new_mem a} a < ptr || a >= ptr + num_bytes ==> old_mem.[a] == new_mem.[ a])
*)
(** Convenient memory-related functions **)
let rec buffers_readable (h: vale_heap) (l: list M.buffer64) : GTot prop0 (decreases l) =
match l with
| [] -> True
| b :: l' -> buffer_readable h b /\ buffers_readable h l'
unfold let modifies_buffer (b:M.buffer64) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer_2 (b1 b2:M.buffer64) (h1 h2:vale_heap) = | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
b1: Vale.X64.Memory.buffer64 ->
b2: Vale.X64.Memory.buffer64 ->
b3: Vale.X64.Memory.buffer64 ->
h1: Vale.X64.Decls.vale_heap ->
h2: Vale.X64.Decls.vale_heap
-> Vale.Def.Prop_s.prop0 | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Memory.buffer64",
"Vale.X64.Decls.vale_heap",
"Vale.X64.Decls.modifies_mem",
"Vale.X64.Memory.loc_union",
"Vale.X64.Decls.loc_buffer",
"Vale.X64.Memory.vuint64",
"Vale.Def.Prop_s.prop0"
] | [] | false | false | false | true | false | let modifies_buffer_3 (b1 b2 b3: M.buffer64) (h1 h2: vale_heap) =
| modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2 | false |
|
Vale.X64.Decls.fsti | Vale.X64.Decls.modifies_buffer | val modifies_buffer : b: Vale.X64.Memory.buffer64 -> h1: Vale.X64.Decls.vale_heap -> h2: Vale.X64.Decls.vale_heap
-> Vale.Def.Prop_s.prop0 | let modifies_buffer (b:M.buffer64) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2 | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 95,
"end_line": 390,
"start_col": 7,
"start_line": 390
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl
(** Constructors for va_code *)
unfold let va_Block (block:va_codes) : va_code = Block block
unfold let va_IfElse (ifCond:ocmp) (ifTrue:va_code) (ifFalse:va_code) : va_code = IfElse ifCond ifTrue ifFalse
unfold let va_While (whileCond:ocmp) (whileBody:va_code) : va_code = While whileCond whileBody
val va_cmp_eq (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ne (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_le (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ge (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_lt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_gt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
unfold let va_get_block (c:va_code{Block? c}) : va_codes = Block?.block c
unfold let va_get_ifCond (c:va_code{IfElse? c}) : ocmp = IfElse?.ifCond c
unfold let va_get_ifTrue (c:va_code{IfElse? c}) : va_code = IfElse?.ifTrue c
unfold let va_get_ifFalse (c:va_code{IfElse? c}) : va_code = IfElse?.ifFalse c
unfold let va_get_whileCond (c:va_code{While? c}) : ocmp = While?.whileCond c
unfold let va_get_whileBody (c:va_code{While? c}) : va_code = While?.whileBody c
(** Map syntax **)
//unfold let (.[]) (m:vale_heap) (b:M.buffer64) = fun index -> buffer64_read b index m
// syntax for map accesses, m.[key] and m.[key] <- value
(*
type map (key:eqtype) (value:Type) = Map.t key value
unfold let (.[]) = Map.sel
unfold let (.[]<-) = Map.upd
*)
(** Memory framing **)
(*
unfold let in_mem (addr:int) (m:mem) : bool = m `Map.contains` addr
let disjoint (ptr1:int) (num_bytes1:int) (ptr2:int) (num_bytes2:int) =
ptr1 + num_bytes1 <= ptr2 \/ ptr2 + num_bytes2 <= ptr1
let validSrcAddrs (mem:mem) (addr:int) (size:int) (num_bytes:int) =
size == 64 /\
(forall (a:int) . {:pattern (mem `Map.contains` a)} addr <= a && a < addr+num_bytes && (a - addr) % 8 = 0 ==> mem `Map.contains` a)
let memModified (old_mem:mem) (new_mem:mem) (ptr:int) (num_bytes) =
(forall (a:int) . {:pattern (new_mem `Map.contains` a)} old_mem `Map.contains` a <==> new_mem `Map.contains` a) /\
(forall (a:int) . {:pattern (new_mem.[a]) \/ Map.sel new_mem a} a < ptr || a >= ptr + num_bytes ==> old_mem.[a] == new_mem.[ a])
*)
(** Convenient memory-related functions **)
let rec buffers_readable (h: vale_heap) (l: list M.buffer64) : GTot prop0 (decreases l) =
match l with
| [] -> True
| b :: l' -> buffer_readable h b /\ buffers_readable h l' | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | b: Vale.X64.Memory.buffer64 -> h1: Vale.X64.Decls.vale_heap -> h2: Vale.X64.Decls.vale_heap
-> Vale.Def.Prop_s.prop0 | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Memory.buffer64",
"Vale.X64.Decls.vale_heap",
"Vale.X64.Decls.modifies_mem",
"Vale.X64.Decls.loc_buffer",
"Vale.X64.Memory.vuint64",
"Vale.Def.Prop_s.prop0"
] | [] | false | false | false | true | false | let modifies_buffer (b: M.buffer64) (h1 h2: vale_heap) =
| modifies_mem (loc_buffer b) h1 h2 | false |
|
Vale.X64.Decls.fsti | Vale.X64.Decls.modifies_buffer_2 | val modifies_buffer_2 : b1: Vale.X64.Memory.buffer64 ->
b2: Vale.X64.Memory.buffer64 ->
h1: Vale.X64.Decls.vale_heap ->
h2: Vale.X64.Decls.vale_heap
-> Vale.Def.Prop_s.prop0 | let modifies_buffer_2 (b1 b2:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2 | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 66,
"end_line": 392,
"start_col": 7,
"start_line": 391
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl
(** Constructors for va_code *)
unfold let va_Block (block:va_codes) : va_code = Block block
unfold let va_IfElse (ifCond:ocmp) (ifTrue:va_code) (ifFalse:va_code) : va_code = IfElse ifCond ifTrue ifFalse
unfold let va_While (whileCond:ocmp) (whileBody:va_code) : va_code = While whileCond whileBody
val va_cmp_eq (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ne (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_le (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ge (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_lt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_gt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
unfold let va_get_block (c:va_code{Block? c}) : va_codes = Block?.block c
unfold let va_get_ifCond (c:va_code{IfElse? c}) : ocmp = IfElse?.ifCond c
unfold let va_get_ifTrue (c:va_code{IfElse? c}) : va_code = IfElse?.ifTrue c
unfold let va_get_ifFalse (c:va_code{IfElse? c}) : va_code = IfElse?.ifFalse c
unfold let va_get_whileCond (c:va_code{While? c}) : ocmp = While?.whileCond c
unfold let va_get_whileBody (c:va_code{While? c}) : va_code = While?.whileBody c
(** Map syntax **)
//unfold let (.[]) (m:vale_heap) (b:M.buffer64) = fun index -> buffer64_read b index m
// syntax for map accesses, m.[key] and m.[key] <- value
(*
type map (key:eqtype) (value:Type) = Map.t key value
unfold let (.[]) = Map.sel
unfold let (.[]<-) = Map.upd
*)
(** Memory framing **)
(*
unfold let in_mem (addr:int) (m:mem) : bool = m `Map.contains` addr
let disjoint (ptr1:int) (num_bytes1:int) (ptr2:int) (num_bytes2:int) =
ptr1 + num_bytes1 <= ptr2 \/ ptr2 + num_bytes2 <= ptr1
let validSrcAddrs (mem:mem) (addr:int) (size:int) (num_bytes:int) =
size == 64 /\
(forall (a:int) . {:pattern (mem `Map.contains` a)} addr <= a && a < addr+num_bytes && (a - addr) % 8 = 0 ==> mem `Map.contains` a)
let memModified (old_mem:mem) (new_mem:mem) (ptr:int) (num_bytes) =
(forall (a:int) . {:pattern (new_mem `Map.contains` a)} old_mem `Map.contains` a <==> new_mem `Map.contains` a) /\
(forall (a:int) . {:pattern (new_mem.[a]) \/ Map.sel new_mem a} a < ptr || a >= ptr + num_bytes ==> old_mem.[a] == new_mem.[ a])
*)
(** Convenient memory-related functions **)
let rec buffers_readable (h: vale_heap) (l: list M.buffer64) : GTot prop0 (decreases l) =
match l with
| [] -> True
| b :: l' -> buffer_readable h b /\ buffers_readable h l' | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
b1: Vale.X64.Memory.buffer64 ->
b2: Vale.X64.Memory.buffer64 ->
h1: Vale.X64.Decls.vale_heap ->
h2: Vale.X64.Decls.vale_heap
-> Vale.Def.Prop_s.prop0 | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Memory.buffer64",
"Vale.X64.Decls.vale_heap",
"Vale.X64.Decls.modifies_mem",
"Vale.X64.Memory.loc_union",
"Vale.X64.Decls.loc_buffer",
"Vale.X64.Memory.vuint64",
"Vale.Def.Prop_s.prop0"
] | [] | false | false | false | true | false | let modifies_buffer_2 (b1 b2: M.buffer64) (h1 h2: vale_heap) =
| modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2 | false |
|
Vale.X64.Decls.fsti | Vale.X64.Decls.modifies_buffer128_2 | val modifies_buffer128_2 : b1: Vale.X64.Memory.buffer128 ->
b2: Vale.X64.Memory.buffer128 ->
h1: Vale.X64.Decls.vale_heap ->
h2: Vale.X64.Decls.vale_heap
-> Vale.Def.Prop_s.prop0 | let modifies_buffer128_2 (b1 b2:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2 | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 66,
"end_line": 397,
"start_col": 7,
"start_line": 396
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl
(** Constructors for va_code *)
unfold let va_Block (block:va_codes) : va_code = Block block
unfold let va_IfElse (ifCond:ocmp) (ifTrue:va_code) (ifFalse:va_code) : va_code = IfElse ifCond ifTrue ifFalse
unfold let va_While (whileCond:ocmp) (whileBody:va_code) : va_code = While whileCond whileBody
val va_cmp_eq (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ne (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_le (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ge (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_lt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_gt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
unfold let va_get_block (c:va_code{Block? c}) : va_codes = Block?.block c
unfold let va_get_ifCond (c:va_code{IfElse? c}) : ocmp = IfElse?.ifCond c
unfold let va_get_ifTrue (c:va_code{IfElse? c}) : va_code = IfElse?.ifTrue c
unfold let va_get_ifFalse (c:va_code{IfElse? c}) : va_code = IfElse?.ifFalse c
unfold let va_get_whileCond (c:va_code{While? c}) : ocmp = While?.whileCond c
unfold let va_get_whileBody (c:va_code{While? c}) : va_code = While?.whileBody c
(** Map syntax **)
//unfold let (.[]) (m:vale_heap) (b:M.buffer64) = fun index -> buffer64_read b index m
// syntax for map accesses, m.[key] and m.[key] <- value
(*
type map (key:eqtype) (value:Type) = Map.t key value
unfold let (.[]) = Map.sel
unfold let (.[]<-) = Map.upd
*)
(** Memory framing **)
(*
unfold let in_mem (addr:int) (m:mem) : bool = m `Map.contains` addr
let disjoint (ptr1:int) (num_bytes1:int) (ptr2:int) (num_bytes2:int) =
ptr1 + num_bytes1 <= ptr2 \/ ptr2 + num_bytes2 <= ptr1
let validSrcAddrs (mem:mem) (addr:int) (size:int) (num_bytes:int) =
size == 64 /\
(forall (a:int) . {:pattern (mem `Map.contains` a)} addr <= a && a < addr+num_bytes && (a - addr) % 8 = 0 ==> mem `Map.contains` a)
let memModified (old_mem:mem) (new_mem:mem) (ptr:int) (num_bytes) =
(forall (a:int) . {:pattern (new_mem `Map.contains` a)} old_mem `Map.contains` a <==> new_mem `Map.contains` a) /\
(forall (a:int) . {:pattern (new_mem.[a]) \/ Map.sel new_mem a} a < ptr || a >= ptr + num_bytes ==> old_mem.[a] == new_mem.[ a])
*)
(** Convenient memory-related functions **)
let rec buffers_readable (h: vale_heap) (l: list M.buffer64) : GTot prop0 (decreases l) =
match l with
| [] -> True
| b :: l' -> buffer_readable h b /\ buffers_readable h l'
unfold let modifies_buffer (b:M.buffer64) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer_2 (b1 b2:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer_3 (b1 b2 b3:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2 | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
b1: Vale.X64.Memory.buffer128 ->
b2: Vale.X64.Memory.buffer128 ->
h1: Vale.X64.Decls.vale_heap ->
h2: Vale.X64.Decls.vale_heap
-> Vale.Def.Prop_s.prop0 | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Memory.buffer128",
"Vale.X64.Decls.vale_heap",
"Vale.X64.Decls.modifies_mem",
"Vale.X64.Memory.loc_union",
"Vale.X64.Decls.loc_buffer",
"Vale.X64.Memory.vuint128",
"Vale.Def.Prop_s.prop0"
] | [] | false | false | false | true | false | let modifies_buffer128_2 (b1 b2: M.buffer128) (h1 h2: vale_heap) =
| modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2 | false |
|
Vale.X64.Decls.fsti | Vale.X64.Decls.validDstAddrsOffset128 | val validDstAddrsOffset128 : h: Vale.X64.Decls.vale_heap ->
addr: Prims.int ->
b: Vale.X64.Memory.buffer128 ->
offset: Prims.int ->
len: Prims.int ->
layout: Vale.Arch.HeapImpl.vale_heap_layout ->
tn: Vale.Arch.HeapTypes_s.taint
-> Prims.logical | let validDstAddrsOffset128 (h:vale_heap) (addr:int) (b:M.buffer128) (offset len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h (addr - 16 * offset) b (len + offset) layout tn | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 65,
"end_line": 429,
"start_col": 0,
"start_line": 428
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl
(** Constructors for va_code *)
unfold let va_Block (block:va_codes) : va_code = Block block
unfold let va_IfElse (ifCond:ocmp) (ifTrue:va_code) (ifFalse:va_code) : va_code = IfElse ifCond ifTrue ifFalse
unfold let va_While (whileCond:ocmp) (whileBody:va_code) : va_code = While whileCond whileBody
val va_cmp_eq (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ne (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_le (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ge (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_lt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_gt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
unfold let va_get_block (c:va_code{Block? c}) : va_codes = Block?.block c
unfold let va_get_ifCond (c:va_code{IfElse? c}) : ocmp = IfElse?.ifCond c
unfold let va_get_ifTrue (c:va_code{IfElse? c}) : va_code = IfElse?.ifTrue c
unfold let va_get_ifFalse (c:va_code{IfElse? c}) : va_code = IfElse?.ifFalse c
unfold let va_get_whileCond (c:va_code{While? c}) : ocmp = While?.whileCond c
unfold let va_get_whileBody (c:va_code{While? c}) : va_code = While?.whileBody c
(** Map syntax **)
//unfold let (.[]) (m:vale_heap) (b:M.buffer64) = fun index -> buffer64_read b index m
// syntax for map accesses, m.[key] and m.[key] <- value
(*
type map (key:eqtype) (value:Type) = Map.t key value
unfold let (.[]) = Map.sel
unfold let (.[]<-) = Map.upd
*)
(** Memory framing **)
(*
unfold let in_mem (addr:int) (m:mem) : bool = m `Map.contains` addr
let disjoint (ptr1:int) (num_bytes1:int) (ptr2:int) (num_bytes2:int) =
ptr1 + num_bytes1 <= ptr2 \/ ptr2 + num_bytes2 <= ptr1
let validSrcAddrs (mem:mem) (addr:int) (size:int) (num_bytes:int) =
size == 64 /\
(forall (a:int) . {:pattern (mem `Map.contains` a)} addr <= a && a < addr+num_bytes && (a - addr) % 8 = 0 ==> mem `Map.contains` a)
let memModified (old_mem:mem) (new_mem:mem) (ptr:int) (num_bytes) =
(forall (a:int) . {:pattern (new_mem `Map.contains` a)} old_mem `Map.contains` a <==> new_mem `Map.contains` a) /\
(forall (a:int) . {:pattern (new_mem.[a]) \/ Map.sel new_mem a} a < ptr || a >= ptr + num_bytes ==> old_mem.[a] == new_mem.[ a])
*)
(** Convenient memory-related functions **)
let rec buffers_readable (h: vale_heap) (l: list M.buffer64) : GTot prop0 (decreases l) =
match l with
| [] -> True
| b :: l' -> buffer_readable h b /\ buffers_readable h l'
unfold let modifies_buffer (b:M.buffer64) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer_2 (b1 b2:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer_3 (b1 b2 b3:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
unfold let modifies_buffer128 (b:M.buffer128) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer128_2 (b1 b2:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer128_3 (b1 b2 b3:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
let validSrcAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
buffer_readable h b /\
len <= buffer_length b /\
M.buffer_addr b h == addr /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) false /\
M.valid_taint_buf b h layout.vl_taint tn
let validDstAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) true /\
buffer_writeable b
let validSrcAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn
let validDstAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn
let validSrcAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn
let validDstAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn
let validSrcAddrsOffset128 (h:vale_heap) (addr:int) (b:M.buffer128) (offset len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h (addr - 16 * offset) b (len + offset) layout tn | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
h: Vale.X64.Decls.vale_heap ->
addr: Prims.int ->
b: Vale.X64.Memory.buffer128 ->
offset: Prims.int ->
len: Prims.int ->
layout: Vale.Arch.HeapImpl.vale_heap_layout ->
tn: Vale.Arch.HeapTypes_s.taint
-> Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Decls.vale_heap",
"Prims.int",
"Vale.X64.Memory.buffer128",
"Vale.Arch.HeapImpl.vale_heap_layout",
"Vale.Arch.HeapTypes_s.taint",
"Vale.X64.Decls.validDstAddrs",
"Vale.X64.Memory.vuint128",
"Prims.op_Subtraction",
"FStar.Mul.op_Star",
"Prims.op_Addition",
"Prims.logical"
] | [] | false | false | false | true | true | let validDstAddrsOffset128
(h: vale_heap)
(addr: int)
(b: M.buffer128)
(offset len: int)
(layout: vale_heap_layout)
(tn: taint)
=
| validDstAddrs h (addr - 16 * offset) b (len + offset) layout tn | false |
|
Vale.X64.Decls.fsti | Vale.X64.Decls.validSrcAddrs | val validSrcAddrs : h: Vale.X64.Decls.vale_heap ->
addr: Prims.int ->
b: Vale.X64.Memory.buffer t ->
len: Prims.int ->
layout: Vale.Arch.HeapImpl.vale_heap_layout ->
tn: Vale.Arch.HeapTypes_s.taint
-> Prims.logical | let validSrcAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
buffer_readable h b /\
len <= buffer_length b /\
M.buffer_addr b h == addr /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) false /\
M.valid_taint_buf b h layout.vl_taint tn | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 42,
"end_line": 406,
"start_col": 0,
"start_line": 401
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl
(** Constructors for va_code *)
unfold let va_Block (block:va_codes) : va_code = Block block
unfold let va_IfElse (ifCond:ocmp) (ifTrue:va_code) (ifFalse:va_code) : va_code = IfElse ifCond ifTrue ifFalse
unfold let va_While (whileCond:ocmp) (whileBody:va_code) : va_code = While whileCond whileBody
val va_cmp_eq (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ne (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_le (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ge (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_lt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_gt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
unfold let va_get_block (c:va_code{Block? c}) : va_codes = Block?.block c
unfold let va_get_ifCond (c:va_code{IfElse? c}) : ocmp = IfElse?.ifCond c
unfold let va_get_ifTrue (c:va_code{IfElse? c}) : va_code = IfElse?.ifTrue c
unfold let va_get_ifFalse (c:va_code{IfElse? c}) : va_code = IfElse?.ifFalse c
unfold let va_get_whileCond (c:va_code{While? c}) : ocmp = While?.whileCond c
unfold let va_get_whileBody (c:va_code{While? c}) : va_code = While?.whileBody c
(** Map syntax **)
//unfold let (.[]) (m:vale_heap) (b:M.buffer64) = fun index -> buffer64_read b index m
// syntax for map accesses, m.[key] and m.[key] <- value
(*
type map (key:eqtype) (value:Type) = Map.t key value
unfold let (.[]) = Map.sel
unfold let (.[]<-) = Map.upd
*)
(** Memory framing **)
(*
unfold let in_mem (addr:int) (m:mem) : bool = m `Map.contains` addr
let disjoint (ptr1:int) (num_bytes1:int) (ptr2:int) (num_bytes2:int) =
ptr1 + num_bytes1 <= ptr2 \/ ptr2 + num_bytes2 <= ptr1
let validSrcAddrs (mem:mem) (addr:int) (size:int) (num_bytes:int) =
size == 64 /\
(forall (a:int) . {:pattern (mem `Map.contains` a)} addr <= a && a < addr+num_bytes && (a - addr) % 8 = 0 ==> mem `Map.contains` a)
let memModified (old_mem:mem) (new_mem:mem) (ptr:int) (num_bytes) =
(forall (a:int) . {:pattern (new_mem `Map.contains` a)} old_mem `Map.contains` a <==> new_mem `Map.contains` a) /\
(forall (a:int) . {:pattern (new_mem.[a]) \/ Map.sel new_mem a} a < ptr || a >= ptr + num_bytes ==> old_mem.[a] == new_mem.[ a])
*)
(** Convenient memory-related functions **)
let rec buffers_readable (h: vale_heap) (l: list M.buffer64) : GTot prop0 (decreases l) =
match l with
| [] -> True
| b :: l' -> buffer_readable h b /\ buffers_readable h l'
unfold let modifies_buffer (b:M.buffer64) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer_2 (b1 b2:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer_3 (b1 b2 b3:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
unfold let modifies_buffer128 (b:M.buffer128) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer128_2 (b1 b2:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer128_3 (b1 b2 b3:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2 | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
h: Vale.X64.Decls.vale_heap ->
addr: Prims.int ->
b: Vale.X64.Memory.buffer t ->
len: Prims.int ->
layout: Vale.Arch.HeapImpl.vale_heap_layout ->
tn: Vale.Arch.HeapTypes_s.taint
-> Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Vale.Arch.HeapTypes_s.base_typ",
"Vale.X64.Decls.vale_heap",
"Prims.int",
"Vale.X64.Memory.buffer",
"Vale.Arch.HeapImpl.vale_heap_layout",
"Vale.Arch.HeapTypes_s.taint",
"Prims.l_and",
"Vale.X64.Decls.buffer_readable",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"Vale.X64.Decls.buffer_length",
"Prims.eq2",
"Vale.X64.Memory.buffer_addr",
"Vale.X64.Memory.valid_layout_buffer_id",
"Vale.X64.Memory.get_heaplet_id",
"Vale.X64.Memory.valid_taint_buf",
"Vale.Arch.HeapImpl.__proj__Mkvale_heap_layout__item__vl_taint",
"Prims.logical"
] | [] | false | false | false | false | true | let validSrcAddrs
(#t: base_typ)
(h: vale_heap)
(addr: int)
(b: M.buffer t)
(len: int)
(layout: vale_heap_layout)
(tn: taint)
=
| buffer_readable h b /\ len <= buffer_length b /\ M.buffer_addr b h == addr /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) false /\
M.valid_taint_buf b h layout.vl_taint tn | false |
|
Vale.X64.Decls.fsti | Vale.X64.Decls.validDstAddrs64 | val validDstAddrs64 : h: Vale.X64.Decls.vale_heap ->
addr: Prims.int ->
b: Vale.X64.Memory.buffer64 ->
len: Prims.int ->
layout: Vale.Arch.HeapImpl.vale_heap_layout ->
tn: Vale.Arch.HeapTypes_s.taint
-> Prims.logical | let validDstAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 38,
"end_line": 417,
"start_col": 0,
"start_line": 416
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl
(** Constructors for va_code *)
unfold let va_Block (block:va_codes) : va_code = Block block
unfold let va_IfElse (ifCond:ocmp) (ifTrue:va_code) (ifFalse:va_code) : va_code = IfElse ifCond ifTrue ifFalse
unfold let va_While (whileCond:ocmp) (whileBody:va_code) : va_code = While whileCond whileBody
val va_cmp_eq (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ne (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_le (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ge (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_lt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_gt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
unfold let va_get_block (c:va_code{Block? c}) : va_codes = Block?.block c
unfold let va_get_ifCond (c:va_code{IfElse? c}) : ocmp = IfElse?.ifCond c
unfold let va_get_ifTrue (c:va_code{IfElse? c}) : va_code = IfElse?.ifTrue c
unfold let va_get_ifFalse (c:va_code{IfElse? c}) : va_code = IfElse?.ifFalse c
unfold let va_get_whileCond (c:va_code{While? c}) : ocmp = While?.whileCond c
unfold let va_get_whileBody (c:va_code{While? c}) : va_code = While?.whileBody c
(** Map syntax **)
//unfold let (.[]) (m:vale_heap) (b:M.buffer64) = fun index -> buffer64_read b index m
// syntax for map accesses, m.[key] and m.[key] <- value
(*
type map (key:eqtype) (value:Type) = Map.t key value
unfold let (.[]) = Map.sel
unfold let (.[]<-) = Map.upd
*)
(** Memory framing **)
(*
unfold let in_mem (addr:int) (m:mem) : bool = m `Map.contains` addr
let disjoint (ptr1:int) (num_bytes1:int) (ptr2:int) (num_bytes2:int) =
ptr1 + num_bytes1 <= ptr2 \/ ptr2 + num_bytes2 <= ptr1
let validSrcAddrs (mem:mem) (addr:int) (size:int) (num_bytes:int) =
size == 64 /\
(forall (a:int) . {:pattern (mem `Map.contains` a)} addr <= a && a < addr+num_bytes && (a - addr) % 8 = 0 ==> mem `Map.contains` a)
let memModified (old_mem:mem) (new_mem:mem) (ptr:int) (num_bytes) =
(forall (a:int) . {:pattern (new_mem `Map.contains` a)} old_mem `Map.contains` a <==> new_mem `Map.contains` a) /\
(forall (a:int) . {:pattern (new_mem.[a]) \/ Map.sel new_mem a} a < ptr || a >= ptr + num_bytes ==> old_mem.[a] == new_mem.[ a])
*)
(** Convenient memory-related functions **)
let rec buffers_readable (h: vale_heap) (l: list M.buffer64) : GTot prop0 (decreases l) =
match l with
| [] -> True
| b :: l' -> buffer_readable h b /\ buffers_readable h l'
unfold let modifies_buffer (b:M.buffer64) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer_2 (b1 b2:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer_3 (b1 b2 b3:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
unfold let modifies_buffer128 (b:M.buffer128) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer128_2 (b1 b2:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer128_3 (b1 b2 b3:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
let validSrcAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
buffer_readable h b /\
len <= buffer_length b /\
M.buffer_addr b h == addr /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) false /\
M.valid_taint_buf b h layout.vl_taint tn
let validDstAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) true /\
buffer_writeable b
let validSrcAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
h: Vale.X64.Decls.vale_heap ->
addr: Prims.int ->
b: Vale.X64.Memory.buffer64 ->
len: Prims.int ->
layout: Vale.Arch.HeapImpl.vale_heap_layout ->
tn: Vale.Arch.HeapTypes_s.taint
-> Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Decls.vale_heap",
"Prims.int",
"Vale.X64.Memory.buffer64",
"Vale.Arch.HeapImpl.vale_heap_layout",
"Vale.Arch.HeapTypes_s.taint",
"Vale.X64.Decls.validDstAddrs",
"Vale.X64.Memory.vuint64",
"Prims.logical"
] | [] | false | false | false | true | true | let validDstAddrs64
(h: vale_heap)
(addr: int)
(b: M.buffer64)
(len: int)
(layout: vale_heap_layout)
(tn: taint)
=
| validDstAddrs h addr b len layout tn | false |
|
Vale.X64.Decls.fsti | Vale.X64.Decls.validDstAddrs | val validDstAddrs : h: Vale.X64.Decls.vale_heap ->
addr: Prims.int ->
b: Vale.X64.Memory.buffer t ->
len: Prims.int ->
layout: Vale.Arch.HeapImpl.vale_heap_layout ->
tn: Vale.Arch.HeapTypes_s.taint
-> Prims.logical | let validDstAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) true /\
buffer_writeable b | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 20,
"end_line": 411,
"start_col": 0,
"start_line": 408
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl
(** Constructors for va_code *)
unfold let va_Block (block:va_codes) : va_code = Block block
unfold let va_IfElse (ifCond:ocmp) (ifTrue:va_code) (ifFalse:va_code) : va_code = IfElse ifCond ifTrue ifFalse
unfold let va_While (whileCond:ocmp) (whileBody:va_code) : va_code = While whileCond whileBody
val va_cmp_eq (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ne (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_le (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ge (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_lt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_gt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
unfold let va_get_block (c:va_code{Block? c}) : va_codes = Block?.block c
unfold let va_get_ifCond (c:va_code{IfElse? c}) : ocmp = IfElse?.ifCond c
unfold let va_get_ifTrue (c:va_code{IfElse? c}) : va_code = IfElse?.ifTrue c
unfold let va_get_ifFalse (c:va_code{IfElse? c}) : va_code = IfElse?.ifFalse c
unfold let va_get_whileCond (c:va_code{While? c}) : ocmp = While?.whileCond c
unfold let va_get_whileBody (c:va_code{While? c}) : va_code = While?.whileBody c
(** Map syntax **)
//unfold let (.[]) (m:vale_heap) (b:M.buffer64) = fun index -> buffer64_read b index m
// syntax for map accesses, m.[key] and m.[key] <- value
(*
type map (key:eqtype) (value:Type) = Map.t key value
unfold let (.[]) = Map.sel
unfold let (.[]<-) = Map.upd
*)
(** Memory framing **)
(*
unfold let in_mem (addr:int) (m:mem) : bool = m `Map.contains` addr
let disjoint (ptr1:int) (num_bytes1:int) (ptr2:int) (num_bytes2:int) =
ptr1 + num_bytes1 <= ptr2 \/ ptr2 + num_bytes2 <= ptr1
let validSrcAddrs (mem:mem) (addr:int) (size:int) (num_bytes:int) =
size == 64 /\
(forall (a:int) . {:pattern (mem `Map.contains` a)} addr <= a && a < addr+num_bytes && (a - addr) % 8 = 0 ==> mem `Map.contains` a)
let memModified (old_mem:mem) (new_mem:mem) (ptr:int) (num_bytes) =
(forall (a:int) . {:pattern (new_mem `Map.contains` a)} old_mem `Map.contains` a <==> new_mem `Map.contains` a) /\
(forall (a:int) . {:pattern (new_mem.[a]) \/ Map.sel new_mem a} a < ptr || a >= ptr + num_bytes ==> old_mem.[a] == new_mem.[ a])
*)
(** Convenient memory-related functions **)
let rec buffers_readable (h: vale_heap) (l: list M.buffer64) : GTot prop0 (decreases l) =
match l with
| [] -> True
| b :: l' -> buffer_readable h b /\ buffers_readable h l'
unfold let modifies_buffer (b:M.buffer64) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer_2 (b1 b2:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer_3 (b1 b2 b3:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
unfold let modifies_buffer128 (b:M.buffer128) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer128_2 (b1 b2:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer128_3 (b1 b2 b3:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
let validSrcAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
buffer_readable h b /\
len <= buffer_length b /\
M.buffer_addr b h == addr /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) false /\
M.valid_taint_buf b h layout.vl_taint tn | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
h: Vale.X64.Decls.vale_heap ->
addr: Prims.int ->
b: Vale.X64.Memory.buffer t ->
len: Prims.int ->
layout: Vale.Arch.HeapImpl.vale_heap_layout ->
tn: Vale.Arch.HeapTypes_s.taint
-> Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Vale.Arch.HeapTypes_s.base_typ",
"Vale.X64.Decls.vale_heap",
"Prims.int",
"Vale.X64.Memory.buffer",
"Vale.Arch.HeapImpl.vale_heap_layout",
"Vale.Arch.HeapTypes_s.taint",
"Prims.l_and",
"Vale.X64.Decls.validSrcAddrs",
"Vale.X64.Memory.valid_layout_buffer_id",
"Vale.X64.Memory.get_heaplet_id",
"Vale.X64.Decls.buffer_writeable",
"Prims.logical"
] | [] | false | false | false | false | true | let validDstAddrs
(#t: base_typ)
(h: vale_heap)
(addr: int)
(b: M.buffer t)
(len: int)
(layout: vale_heap_layout)
(tn: taint)
=
| validSrcAddrs h addr b len layout tn /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) true /\ buffer_writeable b | false |
|
Vale.X64.Decls.fsti | Vale.X64.Decls.validSrcAddrsOffset128 | val validSrcAddrsOffset128 : h: Vale.X64.Decls.vale_heap ->
addr: Prims.int ->
b: Vale.X64.Memory.buffer128 ->
offset: Prims.int ->
len: Prims.int ->
layout: Vale.Arch.HeapImpl.vale_heap_layout ->
tn: Vale.Arch.HeapTypes_s.taint
-> Prims.logical | let validSrcAddrsOffset128 (h:vale_heap) (addr:int) (b:M.buffer128) (offset len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h (addr - 16 * offset) b (len + offset) layout tn | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 65,
"end_line": 426,
"start_col": 0,
"start_line": 425
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl
(** Constructors for va_code *)
unfold let va_Block (block:va_codes) : va_code = Block block
unfold let va_IfElse (ifCond:ocmp) (ifTrue:va_code) (ifFalse:va_code) : va_code = IfElse ifCond ifTrue ifFalse
unfold let va_While (whileCond:ocmp) (whileBody:va_code) : va_code = While whileCond whileBody
val va_cmp_eq (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ne (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_le (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ge (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_lt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_gt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
unfold let va_get_block (c:va_code{Block? c}) : va_codes = Block?.block c
unfold let va_get_ifCond (c:va_code{IfElse? c}) : ocmp = IfElse?.ifCond c
unfold let va_get_ifTrue (c:va_code{IfElse? c}) : va_code = IfElse?.ifTrue c
unfold let va_get_ifFalse (c:va_code{IfElse? c}) : va_code = IfElse?.ifFalse c
unfold let va_get_whileCond (c:va_code{While? c}) : ocmp = While?.whileCond c
unfold let va_get_whileBody (c:va_code{While? c}) : va_code = While?.whileBody c
(** Map syntax **)
//unfold let (.[]) (m:vale_heap) (b:M.buffer64) = fun index -> buffer64_read b index m
// syntax for map accesses, m.[key] and m.[key] <- value
(*
type map (key:eqtype) (value:Type) = Map.t key value
unfold let (.[]) = Map.sel
unfold let (.[]<-) = Map.upd
*)
(** Memory framing **)
(*
unfold let in_mem (addr:int) (m:mem) : bool = m `Map.contains` addr
let disjoint (ptr1:int) (num_bytes1:int) (ptr2:int) (num_bytes2:int) =
ptr1 + num_bytes1 <= ptr2 \/ ptr2 + num_bytes2 <= ptr1
let validSrcAddrs (mem:mem) (addr:int) (size:int) (num_bytes:int) =
size == 64 /\
(forall (a:int) . {:pattern (mem `Map.contains` a)} addr <= a && a < addr+num_bytes && (a - addr) % 8 = 0 ==> mem `Map.contains` a)
let memModified (old_mem:mem) (new_mem:mem) (ptr:int) (num_bytes) =
(forall (a:int) . {:pattern (new_mem `Map.contains` a)} old_mem `Map.contains` a <==> new_mem `Map.contains` a) /\
(forall (a:int) . {:pattern (new_mem.[a]) \/ Map.sel new_mem a} a < ptr || a >= ptr + num_bytes ==> old_mem.[a] == new_mem.[ a])
*)
(** Convenient memory-related functions **)
let rec buffers_readable (h: vale_heap) (l: list M.buffer64) : GTot prop0 (decreases l) =
match l with
| [] -> True
| b :: l' -> buffer_readable h b /\ buffers_readable h l'
unfold let modifies_buffer (b:M.buffer64) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer_2 (b1 b2:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer_3 (b1 b2 b3:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
unfold let modifies_buffer128 (b:M.buffer128) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer128_2 (b1 b2:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer128_3 (b1 b2 b3:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
let validSrcAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
buffer_readable h b /\
len <= buffer_length b /\
M.buffer_addr b h == addr /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) false /\
M.valid_taint_buf b h layout.vl_taint tn
let validDstAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) true /\
buffer_writeable b
let validSrcAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn
let validDstAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn
let validSrcAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn
let validDstAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
h: Vale.X64.Decls.vale_heap ->
addr: Prims.int ->
b: Vale.X64.Memory.buffer128 ->
offset: Prims.int ->
len: Prims.int ->
layout: Vale.Arch.HeapImpl.vale_heap_layout ->
tn: Vale.Arch.HeapTypes_s.taint
-> Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Decls.vale_heap",
"Prims.int",
"Vale.X64.Memory.buffer128",
"Vale.Arch.HeapImpl.vale_heap_layout",
"Vale.Arch.HeapTypes_s.taint",
"Vale.X64.Decls.validSrcAddrs",
"Vale.X64.Memory.vuint128",
"Prims.op_Subtraction",
"FStar.Mul.op_Star",
"Prims.op_Addition",
"Prims.logical"
] | [] | false | false | false | true | true | let validSrcAddrsOffset128
(h: vale_heap)
(addr: int)
(b: M.buffer128)
(offset len: int)
(layout: vale_heap_layout)
(tn: taint)
=
| validSrcAddrs h (addr - 16 * offset) b (len + offset) layout tn | false |
|
Vale.X64.Decls.fsti | Vale.X64.Decls.modifies_buffer128 | val modifies_buffer128 : b: Vale.X64.Memory.buffer128 -> h1: Vale.X64.Decls.vale_heap -> h2: Vale.X64.Decls.vale_heap
-> Vale.Def.Prop_s.prop0 | let modifies_buffer128 (b:M.buffer128) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2 | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 99,
"end_line": 395,
"start_col": 7,
"start_line": 395
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl
(** Constructors for va_code *)
unfold let va_Block (block:va_codes) : va_code = Block block
unfold let va_IfElse (ifCond:ocmp) (ifTrue:va_code) (ifFalse:va_code) : va_code = IfElse ifCond ifTrue ifFalse
unfold let va_While (whileCond:ocmp) (whileBody:va_code) : va_code = While whileCond whileBody
val va_cmp_eq (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ne (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_le (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ge (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_lt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_gt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
unfold let va_get_block (c:va_code{Block? c}) : va_codes = Block?.block c
unfold let va_get_ifCond (c:va_code{IfElse? c}) : ocmp = IfElse?.ifCond c
unfold let va_get_ifTrue (c:va_code{IfElse? c}) : va_code = IfElse?.ifTrue c
unfold let va_get_ifFalse (c:va_code{IfElse? c}) : va_code = IfElse?.ifFalse c
unfold let va_get_whileCond (c:va_code{While? c}) : ocmp = While?.whileCond c
unfold let va_get_whileBody (c:va_code{While? c}) : va_code = While?.whileBody c
(** Map syntax **)
//unfold let (.[]) (m:vale_heap) (b:M.buffer64) = fun index -> buffer64_read b index m
// syntax for map accesses, m.[key] and m.[key] <- value
(*
type map (key:eqtype) (value:Type) = Map.t key value
unfold let (.[]) = Map.sel
unfold let (.[]<-) = Map.upd
*)
(** Memory framing **)
(*
unfold let in_mem (addr:int) (m:mem) : bool = m `Map.contains` addr
let disjoint (ptr1:int) (num_bytes1:int) (ptr2:int) (num_bytes2:int) =
ptr1 + num_bytes1 <= ptr2 \/ ptr2 + num_bytes2 <= ptr1
let validSrcAddrs (mem:mem) (addr:int) (size:int) (num_bytes:int) =
size == 64 /\
(forall (a:int) . {:pattern (mem `Map.contains` a)} addr <= a && a < addr+num_bytes && (a - addr) % 8 = 0 ==> mem `Map.contains` a)
let memModified (old_mem:mem) (new_mem:mem) (ptr:int) (num_bytes) =
(forall (a:int) . {:pattern (new_mem `Map.contains` a)} old_mem `Map.contains` a <==> new_mem `Map.contains` a) /\
(forall (a:int) . {:pattern (new_mem.[a]) \/ Map.sel new_mem a} a < ptr || a >= ptr + num_bytes ==> old_mem.[a] == new_mem.[ a])
*)
(** Convenient memory-related functions **)
let rec buffers_readable (h: vale_heap) (l: list M.buffer64) : GTot prop0 (decreases l) =
match l with
| [] -> True
| b :: l' -> buffer_readable h b /\ buffers_readable h l'
unfold let modifies_buffer (b:M.buffer64) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer_2 (b1 b2:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer_3 (b1 b2 b3:M.buffer64) (h1 h2:vale_heap) = | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | b: Vale.X64.Memory.buffer128 -> h1: Vale.X64.Decls.vale_heap -> h2: Vale.X64.Decls.vale_heap
-> Vale.Def.Prop_s.prop0 | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Memory.buffer128",
"Vale.X64.Decls.vale_heap",
"Vale.X64.Decls.modifies_mem",
"Vale.X64.Decls.loc_buffer",
"Vale.X64.Memory.vuint128",
"Vale.Def.Prop_s.prop0"
] | [] | false | false | false | true | false | let modifies_buffer128 (b: M.buffer128) (h1 h2: vale_heap) =
| modifies_mem (loc_buffer b) h1 h2 | false |
|
Vale.X64.Decls.fsti | Vale.X64.Decls.validSrcAddrs128 | val validSrcAddrs128 : h: Vale.X64.Decls.vale_heap ->
addr: Prims.int ->
b: Vale.X64.Memory.buffer128 ->
len: Prims.int ->
layout: Vale.Arch.HeapImpl.vale_heap_layout ->
tn: Vale.Arch.HeapTypes_s.taint
-> Prims.logical | let validSrcAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 38,
"end_line": 420,
"start_col": 0,
"start_line": 419
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl
(** Constructors for va_code *)
unfold let va_Block (block:va_codes) : va_code = Block block
unfold let va_IfElse (ifCond:ocmp) (ifTrue:va_code) (ifFalse:va_code) : va_code = IfElse ifCond ifTrue ifFalse
unfold let va_While (whileCond:ocmp) (whileBody:va_code) : va_code = While whileCond whileBody
val va_cmp_eq (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ne (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_le (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ge (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_lt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_gt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
unfold let va_get_block (c:va_code{Block? c}) : va_codes = Block?.block c
unfold let va_get_ifCond (c:va_code{IfElse? c}) : ocmp = IfElse?.ifCond c
unfold let va_get_ifTrue (c:va_code{IfElse? c}) : va_code = IfElse?.ifTrue c
unfold let va_get_ifFalse (c:va_code{IfElse? c}) : va_code = IfElse?.ifFalse c
unfold let va_get_whileCond (c:va_code{While? c}) : ocmp = While?.whileCond c
unfold let va_get_whileBody (c:va_code{While? c}) : va_code = While?.whileBody c
(** Map syntax **)
//unfold let (.[]) (m:vale_heap) (b:M.buffer64) = fun index -> buffer64_read b index m
// syntax for map accesses, m.[key] and m.[key] <- value
(*
type map (key:eqtype) (value:Type) = Map.t key value
unfold let (.[]) = Map.sel
unfold let (.[]<-) = Map.upd
*)
(** Memory framing **)
(*
unfold let in_mem (addr:int) (m:mem) : bool = m `Map.contains` addr
let disjoint (ptr1:int) (num_bytes1:int) (ptr2:int) (num_bytes2:int) =
ptr1 + num_bytes1 <= ptr2 \/ ptr2 + num_bytes2 <= ptr1
let validSrcAddrs (mem:mem) (addr:int) (size:int) (num_bytes:int) =
size == 64 /\
(forall (a:int) . {:pattern (mem `Map.contains` a)} addr <= a && a < addr+num_bytes && (a - addr) % 8 = 0 ==> mem `Map.contains` a)
let memModified (old_mem:mem) (new_mem:mem) (ptr:int) (num_bytes) =
(forall (a:int) . {:pattern (new_mem `Map.contains` a)} old_mem `Map.contains` a <==> new_mem `Map.contains` a) /\
(forall (a:int) . {:pattern (new_mem.[a]) \/ Map.sel new_mem a} a < ptr || a >= ptr + num_bytes ==> old_mem.[a] == new_mem.[ a])
*)
(** Convenient memory-related functions **)
let rec buffers_readable (h: vale_heap) (l: list M.buffer64) : GTot prop0 (decreases l) =
match l with
| [] -> True
| b :: l' -> buffer_readable h b /\ buffers_readable h l'
unfold let modifies_buffer (b:M.buffer64) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer_2 (b1 b2:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer_3 (b1 b2 b3:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
unfold let modifies_buffer128 (b:M.buffer128) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer128_2 (b1 b2:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer128_3 (b1 b2 b3:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
let validSrcAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
buffer_readable h b /\
len <= buffer_length b /\
M.buffer_addr b h == addr /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) false /\
M.valid_taint_buf b h layout.vl_taint tn
let validDstAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) true /\
buffer_writeable b
let validSrcAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn
let validDstAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
h: Vale.X64.Decls.vale_heap ->
addr: Prims.int ->
b: Vale.X64.Memory.buffer128 ->
len: Prims.int ->
layout: Vale.Arch.HeapImpl.vale_heap_layout ->
tn: Vale.Arch.HeapTypes_s.taint
-> Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Decls.vale_heap",
"Prims.int",
"Vale.X64.Memory.buffer128",
"Vale.Arch.HeapImpl.vale_heap_layout",
"Vale.Arch.HeapTypes_s.taint",
"Vale.X64.Decls.validSrcAddrs",
"Vale.X64.Memory.vuint128",
"Prims.logical"
] | [] | false | false | false | true | true | let validSrcAddrs128
(h: vale_heap)
(addr: int)
(b: M.buffer128)
(len: int)
(layout: vale_heap_layout)
(tn: taint)
=
| validSrcAddrs h addr b len layout tn | false |
|
Vale.X64.Decls.fsti | Vale.X64.Decls.validDstAddrs128 | val validDstAddrs128 : h: Vale.X64.Decls.vale_heap ->
addr: Prims.int ->
b: Vale.X64.Memory.buffer128 ->
len: Prims.int ->
layout: Vale.Arch.HeapImpl.vale_heap_layout ->
tn: Vale.Arch.HeapTypes_s.taint
-> Prims.logical | let validDstAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 38,
"end_line": 423,
"start_col": 0,
"start_line": 422
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl
(** Constructors for va_code *)
unfold let va_Block (block:va_codes) : va_code = Block block
unfold let va_IfElse (ifCond:ocmp) (ifTrue:va_code) (ifFalse:va_code) : va_code = IfElse ifCond ifTrue ifFalse
unfold let va_While (whileCond:ocmp) (whileBody:va_code) : va_code = While whileCond whileBody
val va_cmp_eq (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ne (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_le (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ge (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_lt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_gt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
unfold let va_get_block (c:va_code{Block? c}) : va_codes = Block?.block c
unfold let va_get_ifCond (c:va_code{IfElse? c}) : ocmp = IfElse?.ifCond c
unfold let va_get_ifTrue (c:va_code{IfElse? c}) : va_code = IfElse?.ifTrue c
unfold let va_get_ifFalse (c:va_code{IfElse? c}) : va_code = IfElse?.ifFalse c
unfold let va_get_whileCond (c:va_code{While? c}) : ocmp = While?.whileCond c
unfold let va_get_whileBody (c:va_code{While? c}) : va_code = While?.whileBody c
(** Map syntax **)
//unfold let (.[]) (m:vale_heap) (b:M.buffer64) = fun index -> buffer64_read b index m
// syntax for map accesses, m.[key] and m.[key] <- value
(*
type map (key:eqtype) (value:Type) = Map.t key value
unfold let (.[]) = Map.sel
unfold let (.[]<-) = Map.upd
*)
(** Memory framing **)
(*
unfold let in_mem (addr:int) (m:mem) : bool = m `Map.contains` addr
let disjoint (ptr1:int) (num_bytes1:int) (ptr2:int) (num_bytes2:int) =
ptr1 + num_bytes1 <= ptr2 \/ ptr2 + num_bytes2 <= ptr1
let validSrcAddrs (mem:mem) (addr:int) (size:int) (num_bytes:int) =
size == 64 /\
(forall (a:int) . {:pattern (mem `Map.contains` a)} addr <= a && a < addr+num_bytes && (a - addr) % 8 = 0 ==> mem `Map.contains` a)
let memModified (old_mem:mem) (new_mem:mem) (ptr:int) (num_bytes) =
(forall (a:int) . {:pattern (new_mem `Map.contains` a)} old_mem `Map.contains` a <==> new_mem `Map.contains` a) /\
(forall (a:int) . {:pattern (new_mem.[a]) \/ Map.sel new_mem a} a < ptr || a >= ptr + num_bytes ==> old_mem.[a] == new_mem.[ a])
*)
(** Convenient memory-related functions **)
let rec buffers_readable (h: vale_heap) (l: list M.buffer64) : GTot prop0 (decreases l) =
match l with
| [] -> True
| b :: l' -> buffer_readable h b /\ buffers_readable h l'
unfold let modifies_buffer (b:M.buffer64) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer_2 (b1 b2:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer_3 (b1 b2 b3:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
unfold let modifies_buffer128 (b:M.buffer128) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer128_2 (b1 b2:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer128_3 (b1 b2 b3:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
let validSrcAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
buffer_readable h b /\
len <= buffer_length b /\
M.buffer_addr b h == addr /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) false /\
M.valid_taint_buf b h layout.vl_taint tn
let validDstAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) true /\
buffer_writeable b
let validSrcAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn
let validDstAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn
let validSrcAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
h: Vale.X64.Decls.vale_heap ->
addr: Prims.int ->
b: Vale.X64.Memory.buffer128 ->
len: Prims.int ->
layout: Vale.Arch.HeapImpl.vale_heap_layout ->
tn: Vale.Arch.HeapTypes_s.taint
-> Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Decls.vale_heap",
"Prims.int",
"Vale.X64.Memory.buffer128",
"Vale.Arch.HeapImpl.vale_heap_layout",
"Vale.Arch.HeapTypes_s.taint",
"Vale.X64.Decls.validDstAddrs",
"Vale.X64.Memory.vuint128",
"Prims.logical"
] | [] | false | false | false | true | true | let validDstAddrs128
(h: vale_heap)
(addr: int)
(b: M.buffer128)
(len: int)
(layout: vale_heap_layout)
(tn: taint)
=
| validDstAddrs h addr b len layout tn | false |
|
Vale.X64.Decls.fsti | Vale.X64.Decls.modifies_buffer_specific | val modifies_buffer_specific (b: M.buffer64) (h1 h2: vale_heap) (start last: nat) : GTot prop0 | val modifies_buffer_specific (b: M.buffer64) (h1 h2: vale_heap) (start last: nat) : GTot prop0 | let modifies_buffer_specific (b:M.buffer64) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
modifies_buffer b h1 h2 /\
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer64_read b i h1
== buffer64_read b i h2) | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 45,
"end_line": 455,
"start_col": 0,
"start_line": 448
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl
(** Constructors for va_code *)
unfold let va_Block (block:va_codes) : va_code = Block block
unfold let va_IfElse (ifCond:ocmp) (ifTrue:va_code) (ifFalse:va_code) : va_code = IfElse ifCond ifTrue ifFalse
unfold let va_While (whileCond:ocmp) (whileBody:va_code) : va_code = While whileCond whileBody
val va_cmp_eq (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ne (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_le (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ge (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_lt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_gt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
unfold let va_get_block (c:va_code{Block? c}) : va_codes = Block?.block c
unfold let va_get_ifCond (c:va_code{IfElse? c}) : ocmp = IfElse?.ifCond c
unfold let va_get_ifTrue (c:va_code{IfElse? c}) : va_code = IfElse?.ifTrue c
unfold let va_get_ifFalse (c:va_code{IfElse? c}) : va_code = IfElse?.ifFalse c
unfold let va_get_whileCond (c:va_code{While? c}) : ocmp = While?.whileCond c
unfold let va_get_whileBody (c:va_code{While? c}) : va_code = While?.whileBody c
(** Map syntax **)
//unfold let (.[]) (m:vale_heap) (b:M.buffer64) = fun index -> buffer64_read b index m
// syntax for map accesses, m.[key] and m.[key] <- value
(*
type map (key:eqtype) (value:Type) = Map.t key value
unfold let (.[]) = Map.sel
unfold let (.[]<-) = Map.upd
*)
(** Memory framing **)
(*
unfold let in_mem (addr:int) (m:mem) : bool = m `Map.contains` addr
let disjoint (ptr1:int) (num_bytes1:int) (ptr2:int) (num_bytes2:int) =
ptr1 + num_bytes1 <= ptr2 \/ ptr2 + num_bytes2 <= ptr1
let validSrcAddrs (mem:mem) (addr:int) (size:int) (num_bytes:int) =
size == 64 /\
(forall (a:int) . {:pattern (mem `Map.contains` a)} addr <= a && a < addr+num_bytes && (a - addr) % 8 = 0 ==> mem `Map.contains` a)
let memModified (old_mem:mem) (new_mem:mem) (ptr:int) (num_bytes) =
(forall (a:int) . {:pattern (new_mem `Map.contains` a)} old_mem `Map.contains` a <==> new_mem `Map.contains` a) /\
(forall (a:int) . {:pattern (new_mem.[a]) \/ Map.sel new_mem a} a < ptr || a >= ptr + num_bytes ==> old_mem.[a] == new_mem.[ a])
*)
(** Convenient memory-related functions **)
let rec buffers_readable (h: vale_heap) (l: list M.buffer64) : GTot prop0 (decreases l) =
match l with
| [] -> True
| b :: l' -> buffer_readable h b /\ buffers_readable h l'
unfold let modifies_buffer (b:M.buffer64) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer_2 (b1 b2:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer_3 (b1 b2 b3:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
unfold let modifies_buffer128 (b:M.buffer128) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer128_2 (b1 b2:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer128_3 (b1 b2 b3:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
let validSrcAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
buffer_readable h b /\
len <= buffer_length b /\
M.buffer_addr b h == addr /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) false /\
M.valid_taint_buf b h layout.vl_taint tn
let validDstAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) true /\
buffer_writeable b
let validSrcAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn
let validDstAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn
let validSrcAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn
let validDstAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn
let validSrcAddrsOffset128 (h:vale_heap) (addr:int) (b:M.buffer128) (offset len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h (addr - 16 * offset) b (len + offset) layout tn
let validDstAddrsOffset128 (h:vale_heap) (addr:int) (b:M.buffer128) (offset len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h (addr - 16 * offset) b (len + offset) layout tn
let modifies_buffer_specific128 (b:M.buffer128) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
modifies_buffer128 b h1 h2 /\
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer128_read b i h1
== buffer128_read b i h2)
let buffer_modifies_specific128 (b:M.buffer128) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer128_read b i h1
== buffer128_read b i h2) | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
b: Vale.X64.Memory.buffer64 ->
h1: Vale.X64.Decls.vale_heap ->
h2: Vale.X64.Decls.vale_heap ->
start: Prims.nat ->
last: Prims.nat
-> Prims.GTot Vale.Def.Prop_s.prop0 | Prims.GTot | [
"sometrivial"
] | [] | [
"Vale.X64.Memory.buffer64",
"Vale.X64.Decls.vale_heap",
"Prims.nat",
"Prims.l_and",
"Vale.X64.Decls.modifies_buffer",
"Prims.l_Forall",
"Prims.l_imp",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"Prims.op_LessThan",
"Vale.X64.Decls.buffer_length",
"Vale.X64.Memory.vuint64",
"Prims.op_BarBar",
"Prims.op_GreaterThan",
"Prims.eq2",
"Vale.Def.Types_s.nat64",
"Vale.X64.Decls.buffer64_read",
"FStar.Seq.Base.index",
"Vale.X64.Memory.base_typ_as_vale_type",
"Vale.X64.Memory.buffer_as_seq",
"Vale.Def.Prop_s.prop0"
] | [] | false | false | false | false | false | let modifies_buffer_specific (b: M.buffer64) (h1 h2: vale_heap) (start last: nat) : GTot prop0 =
| modifies_buffer b h1 h2 /\
(forall (i: nat). {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b /\ (i < start || i > last) ==>
buffer64_read b i h1 == buffer64_read b i h2) | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.modifies_buffer_specific128 | val modifies_buffer_specific128 (b: M.buffer128) (h1 h2: vale_heap) (start last: nat) : GTot prop0 | val modifies_buffer_specific128 (b: M.buffer128) (h1 h2: vale_heap) (start last: nat) : GTot prop0 | let modifies_buffer_specific128 (b:M.buffer128) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
modifies_buffer128 b h1 h2 /\
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer128_read b i h1
== buffer128_read b i h2) | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 46,
"end_line": 438,
"start_col": 0,
"start_line": 431
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl
(** Constructors for va_code *)
unfold let va_Block (block:va_codes) : va_code = Block block
unfold let va_IfElse (ifCond:ocmp) (ifTrue:va_code) (ifFalse:va_code) : va_code = IfElse ifCond ifTrue ifFalse
unfold let va_While (whileCond:ocmp) (whileBody:va_code) : va_code = While whileCond whileBody
val va_cmp_eq (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ne (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_le (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ge (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_lt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_gt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
unfold let va_get_block (c:va_code{Block? c}) : va_codes = Block?.block c
unfold let va_get_ifCond (c:va_code{IfElse? c}) : ocmp = IfElse?.ifCond c
unfold let va_get_ifTrue (c:va_code{IfElse? c}) : va_code = IfElse?.ifTrue c
unfold let va_get_ifFalse (c:va_code{IfElse? c}) : va_code = IfElse?.ifFalse c
unfold let va_get_whileCond (c:va_code{While? c}) : ocmp = While?.whileCond c
unfold let va_get_whileBody (c:va_code{While? c}) : va_code = While?.whileBody c
(** Map syntax **)
//unfold let (.[]) (m:vale_heap) (b:M.buffer64) = fun index -> buffer64_read b index m
// syntax for map accesses, m.[key] and m.[key] <- value
(*
type map (key:eqtype) (value:Type) = Map.t key value
unfold let (.[]) = Map.sel
unfold let (.[]<-) = Map.upd
*)
(** Memory framing **)
(*
unfold let in_mem (addr:int) (m:mem) : bool = m `Map.contains` addr
let disjoint (ptr1:int) (num_bytes1:int) (ptr2:int) (num_bytes2:int) =
ptr1 + num_bytes1 <= ptr2 \/ ptr2 + num_bytes2 <= ptr1
let validSrcAddrs (mem:mem) (addr:int) (size:int) (num_bytes:int) =
size == 64 /\
(forall (a:int) . {:pattern (mem `Map.contains` a)} addr <= a && a < addr+num_bytes && (a - addr) % 8 = 0 ==> mem `Map.contains` a)
let memModified (old_mem:mem) (new_mem:mem) (ptr:int) (num_bytes) =
(forall (a:int) . {:pattern (new_mem `Map.contains` a)} old_mem `Map.contains` a <==> new_mem `Map.contains` a) /\
(forall (a:int) . {:pattern (new_mem.[a]) \/ Map.sel new_mem a} a < ptr || a >= ptr + num_bytes ==> old_mem.[a] == new_mem.[ a])
*)
(** Convenient memory-related functions **)
let rec buffers_readable (h: vale_heap) (l: list M.buffer64) : GTot prop0 (decreases l) =
match l with
| [] -> True
| b :: l' -> buffer_readable h b /\ buffers_readable h l'
unfold let modifies_buffer (b:M.buffer64) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer_2 (b1 b2:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer_3 (b1 b2 b3:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
unfold let modifies_buffer128 (b:M.buffer128) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer128_2 (b1 b2:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer128_3 (b1 b2 b3:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
let validSrcAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
buffer_readable h b /\
len <= buffer_length b /\
M.buffer_addr b h == addr /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) false /\
M.valid_taint_buf b h layout.vl_taint tn
let validDstAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) true /\
buffer_writeable b
let validSrcAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn
let validDstAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn
let validSrcAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn
let validDstAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn
let validSrcAddrsOffset128 (h:vale_heap) (addr:int) (b:M.buffer128) (offset len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h (addr - 16 * offset) b (len + offset) layout tn
let validDstAddrsOffset128 (h:vale_heap) (addr:int) (b:M.buffer128) (offset len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h (addr - 16 * offset) b (len + offset) layout tn | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
b: Vale.X64.Memory.buffer128 ->
h1: Vale.X64.Decls.vale_heap ->
h2: Vale.X64.Decls.vale_heap ->
start: Prims.nat ->
last: Prims.nat
-> Prims.GTot Vale.Def.Prop_s.prop0 | Prims.GTot | [
"sometrivial"
] | [] | [
"Vale.X64.Memory.buffer128",
"Vale.X64.Decls.vale_heap",
"Prims.nat",
"Prims.l_and",
"Vale.X64.Decls.modifies_buffer128",
"Prims.l_Forall",
"Prims.l_imp",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"Prims.op_LessThan",
"Vale.X64.Decls.buffer_length",
"Vale.X64.Memory.vuint128",
"Prims.op_BarBar",
"Prims.op_GreaterThan",
"Prims.eq2",
"Vale.X64.Decls.quad32",
"Vale.X64.Decls.buffer128_read",
"FStar.Seq.Base.index",
"Vale.X64.Memory.base_typ_as_vale_type",
"Vale.X64.Memory.buffer_as_seq",
"Vale.Def.Prop_s.prop0"
] | [] | false | false | false | false | false | let modifies_buffer_specific128 (b: M.buffer128) (h1 h2: vale_heap) (start last: nat) : GTot prop0 =
| modifies_buffer128 b h1 h2 /\
(forall (i: nat). {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b /\ (i < start || i > last) ==>
buffer128_read b i h1 == buffer128_read b i h2) | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.validSrcAddrs64 | val validSrcAddrs64 : h: Vale.X64.Decls.vale_heap ->
addr: Prims.int ->
b: Vale.X64.Memory.buffer64 ->
len: Prims.int ->
layout: Vale.Arch.HeapImpl.vale_heap_layout ->
tn: Vale.Arch.HeapTypes_s.taint
-> Prims.logical | let validSrcAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 38,
"end_line": 414,
"start_col": 0,
"start_line": 413
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl
(** Constructors for va_code *)
unfold let va_Block (block:va_codes) : va_code = Block block
unfold let va_IfElse (ifCond:ocmp) (ifTrue:va_code) (ifFalse:va_code) : va_code = IfElse ifCond ifTrue ifFalse
unfold let va_While (whileCond:ocmp) (whileBody:va_code) : va_code = While whileCond whileBody
val va_cmp_eq (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ne (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_le (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ge (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_lt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_gt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
unfold let va_get_block (c:va_code{Block? c}) : va_codes = Block?.block c
unfold let va_get_ifCond (c:va_code{IfElse? c}) : ocmp = IfElse?.ifCond c
unfold let va_get_ifTrue (c:va_code{IfElse? c}) : va_code = IfElse?.ifTrue c
unfold let va_get_ifFalse (c:va_code{IfElse? c}) : va_code = IfElse?.ifFalse c
unfold let va_get_whileCond (c:va_code{While? c}) : ocmp = While?.whileCond c
unfold let va_get_whileBody (c:va_code{While? c}) : va_code = While?.whileBody c
(** Map syntax **)
//unfold let (.[]) (m:vale_heap) (b:M.buffer64) = fun index -> buffer64_read b index m
// syntax for map accesses, m.[key] and m.[key] <- value
(*
type map (key:eqtype) (value:Type) = Map.t key value
unfold let (.[]) = Map.sel
unfold let (.[]<-) = Map.upd
*)
(** Memory framing **)
(*
unfold let in_mem (addr:int) (m:mem) : bool = m `Map.contains` addr
let disjoint (ptr1:int) (num_bytes1:int) (ptr2:int) (num_bytes2:int) =
ptr1 + num_bytes1 <= ptr2 \/ ptr2 + num_bytes2 <= ptr1
let validSrcAddrs (mem:mem) (addr:int) (size:int) (num_bytes:int) =
size == 64 /\
(forall (a:int) . {:pattern (mem `Map.contains` a)} addr <= a && a < addr+num_bytes && (a - addr) % 8 = 0 ==> mem `Map.contains` a)
let memModified (old_mem:mem) (new_mem:mem) (ptr:int) (num_bytes) =
(forall (a:int) . {:pattern (new_mem `Map.contains` a)} old_mem `Map.contains` a <==> new_mem `Map.contains` a) /\
(forall (a:int) . {:pattern (new_mem.[a]) \/ Map.sel new_mem a} a < ptr || a >= ptr + num_bytes ==> old_mem.[a] == new_mem.[ a])
*)
(** Convenient memory-related functions **)
let rec buffers_readable (h: vale_heap) (l: list M.buffer64) : GTot prop0 (decreases l) =
match l with
| [] -> True
| b :: l' -> buffer_readable h b /\ buffers_readable h l'
unfold let modifies_buffer (b:M.buffer64) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer_2 (b1 b2:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer_3 (b1 b2 b3:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
unfold let modifies_buffer128 (b:M.buffer128) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer128_2 (b1 b2:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer128_3 (b1 b2 b3:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
let validSrcAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
buffer_readable h b /\
len <= buffer_length b /\
M.buffer_addr b h == addr /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) false /\
M.valid_taint_buf b h layout.vl_taint tn
let validDstAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) true /\
buffer_writeable b | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
h: Vale.X64.Decls.vale_heap ->
addr: Prims.int ->
b: Vale.X64.Memory.buffer64 ->
len: Prims.int ->
layout: Vale.Arch.HeapImpl.vale_heap_layout ->
tn: Vale.Arch.HeapTypes_s.taint
-> Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Decls.vale_heap",
"Prims.int",
"Vale.X64.Memory.buffer64",
"Vale.Arch.HeapImpl.vale_heap_layout",
"Vale.Arch.HeapTypes_s.taint",
"Vale.X64.Decls.validSrcAddrs",
"Vale.X64.Memory.vuint64",
"Prims.logical"
] | [] | false | false | false | true | true | let validSrcAddrs64
(h: vale_heap)
(addr: int)
(b: M.buffer64)
(len: int)
(layout: vale_heap_layout)
(tn: taint)
=
| validSrcAddrs h addr b len layout tn | false |
|
Vale.X64.Decls.fsti | Vale.X64.Decls.va_upd_operand_dst_opr64 | val va_upd_operand_dst_opr64 : o: Vale.X64.Machine_s.operand64 -> v: Vale.Def.Types_s.nat64 -> s: Vale.X64.State.vale_state
-> Vale.X64.State.vale_state | let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 22,
"end_line": 313,
"start_col": 0,
"start_line": 308
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | o: Vale.X64.Machine_s.operand64 -> v: Vale.Def.Types_s.nat64 -> s: Vale.X64.State.vale_state
-> Vale.X64.State.vale_state | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Machine_s.operand64",
"Vale.Def.Types_s.nat64",
"Vale.X64.State.vale_state",
"Vale.X64.Machine_s.nat64",
"Vale.X64.Machine_s.reg_64",
"Vale.X64.State.update_reg_64",
"Vale.X64.Machine_s.maddr",
"Vale.Arch.HeapTypes_s.taint"
] | [] | false | false | false | true | false | let va_upd_operand_dst_opr64 (o: operand64) (v: nat64) (s: vale_state) =
| match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s | false |
|
Vale.X64.Decls.fsti | Vale.X64.Decls.buffer_modifies_specific128 | val buffer_modifies_specific128 (b: M.buffer128) (h1 h2: vale_heap) (start last: nat) : GTot prop0 | val buffer_modifies_specific128 (b: M.buffer128) (h1 h2: vale_heap) (start last: nat) : GTot prop0 | let buffer_modifies_specific128 (b:M.buffer128) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer128_read b i h1
== buffer128_read b i h2) | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 46,
"end_line": 446,
"start_col": 0,
"start_line": 440
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl
(** Constructors for va_code *)
unfold let va_Block (block:va_codes) : va_code = Block block
unfold let va_IfElse (ifCond:ocmp) (ifTrue:va_code) (ifFalse:va_code) : va_code = IfElse ifCond ifTrue ifFalse
unfold let va_While (whileCond:ocmp) (whileBody:va_code) : va_code = While whileCond whileBody
val va_cmp_eq (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ne (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_le (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ge (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_lt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_gt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
unfold let va_get_block (c:va_code{Block? c}) : va_codes = Block?.block c
unfold let va_get_ifCond (c:va_code{IfElse? c}) : ocmp = IfElse?.ifCond c
unfold let va_get_ifTrue (c:va_code{IfElse? c}) : va_code = IfElse?.ifTrue c
unfold let va_get_ifFalse (c:va_code{IfElse? c}) : va_code = IfElse?.ifFalse c
unfold let va_get_whileCond (c:va_code{While? c}) : ocmp = While?.whileCond c
unfold let va_get_whileBody (c:va_code{While? c}) : va_code = While?.whileBody c
(** Map syntax **)
//unfold let (.[]) (m:vale_heap) (b:M.buffer64) = fun index -> buffer64_read b index m
// syntax for map accesses, m.[key] and m.[key] <- value
(*
type map (key:eqtype) (value:Type) = Map.t key value
unfold let (.[]) = Map.sel
unfold let (.[]<-) = Map.upd
*)
(** Memory framing **)
(*
unfold let in_mem (addr:int) (m:mem) : bool = m `Map.contains` addr
let disjoint (ptr1:int) (num_bytes1:int) (ptr2:int) (num_bytes2:int) =
ptr1 + num_bytes1 <= ptr2 \/ ptr2 + num_bytes2 <= ptr1
let validSrcAddrs (mem:mem) (addr:int) (size:int) (num_bytes:int) =
size == 64 /\
(forall (a:int) . {:pattern (mem `Map.contains` a)} addr <= a && a < addr+num_bytes && (a - addr) % 8 = 0 ==> mem `Map.contains` a)
let memModified (old_mem:mem) (new_mem:mem) (ptr:int) (num_bytes) =
(forall (a:int) . {:pattern (new_mem `Map.contains` a)} old_mem `Map.contains` a <==> new_mem `Map.contains` a) /\
(forall (a:int) . {:pattern (new_mem.[a]) \/ Map.sel new_mem a} a < ptr || a >= ptr + num_bytes ==> old_mem.[a] == new_mem.[ a])
*)
(** Convenient memory-related functions **)
let rec buffers_readable (h: vale_heap) (l: list M.buffer64) : GTot prop0 (decreases l) =
match l with
| [] -> True
| b :: l' -> buffer_readable h b /\ buffers_readable h l'
unfold let modifies_buffer (b:M.buffer64) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer_2 (b1 b2:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer_3 (b1 b2 b3:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
unfold let modifies_buffer128 (b:M.buffer128) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer128_2 (b1 b2:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer128_3 (b1 b2 b3:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
let validSrcAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
buffer_readable h b /\
len <= buffer_length b /\
M.buffer_addr b h == addr /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) false /\
M.valid_taint_buf b h layout.vl_taint tn
let validDstAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) true /\
buffer_writeable b
let validSrcAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn
let validDstAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn
let validSrcAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn
let validDstAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn
let validSrcAddrsOffset128 (h:vale_heap) (addr:int) (b:M.buffer128) (offset len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h (addr - 16 * offset) b (len + offset) layout tn
let validDstAddrsOffset128 (h:vale_heap) (addr:int) (b:M.buffer128) (offset len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h (addr - 16 * offset) b (len + offset) layout tn
let modifies_buffer_specific128 (b:M.buffer128) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
modifies_buffer128 b h1 h2 /\
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer128_read b i h1
== buffer128_read b i h2) | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
b: Vale.X64.Memory.buffer128 ->
h1: Vale.X64.Decls.vale_heap ->
h2: Vale.X64.Decls.vale_heap ->
start: Prims.nat ->
last: Prims.nat
-> Prims.GTot Vale.Def.Prop_s.prop0 | Prims.GTot | [
"sometrivial"
] | [] | [
"Vale.X64.Memory.buffer128",
"Vale.X64.Decls.vale_heap",
"Prims.nat",
"Prims.l_Forall",
"Prims.l_imp",
"Prims.l_and",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"Prims.op_LessThan",
"Vale.X64.Decls.buffer_length",
"Vale.X64.Memory.vuint128",
"Prims.op_BarBar",
"Prims.op_GreaterThan",
"Prims.eq2",
"Vale.X64.Decls.quad32",
"Vale.X64.Decls.buffer128_read",
"FStar.Seq.Base.index",
"Vale.X64.Memory.base_typ_as_vale_type",
"Vale.X64.Memory.buffer_as_seq",
"Vale.Def.Prop_s.prop0"
] | [] | false | false | false | false | false | let buffer_modifies_specific128 (b: M.buffer128) (h1 h2: vale_heap) (start last: nat) : GTot prop0 =
| (forall (i: nat). {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b /\ (i < start || i > last) ==>
buffer128_read b i h1 == buffer128_read b i h2) | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.buffers_disjoint128 | val buffers_disjoint128 : b1: Vale.X64.Memory.buffer128 -> b2: Vale.X64.Memory.buffer128 -> Vale.Def.Prop_s.prop0 | let buffers_disjoint128 (b1 b2:M.buffer128) =
locs_disjoint [loc_buffer b1; loc_buffer b2] | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 48,
"end_line": 461,
"start_col": 7,
"start_line": 460
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl
(** Constructors for va_code *)
unfold let va_Block (block:va_codes) : va_code = Block block
unfold let va_IfElse (ifCond:ocmp) (ifTrue:va_code) (ifFalse:va_code) : va_code = IfElse ifCond ifTrue ifFalse
unfold let va_While (whileCond:ocmp) (whileBody:va_code) : va_code = While whileCond whileBody
val va_cmp_eq (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ne (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_le (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ge (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_lt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_gt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
unfold let va_get_block (c:va_code{Block? c}) : va_codes = Block?.block c
unfold let va_get_ifCond (c:va_code{IfElse? c}) : ocmp = IfElse?.ifCond c
unfold let va_get_ifTrue (c:va_code{IfElse? c}) : va_code = IfElse?.ifTrue c
unfold let va_get_ifFalse (c:va_code{IfElse? c}) : va_code = IfElse?.ifFalse c
unfold let va_get_whileCond (c:va_code{While? c}) : ocmp = While?.whileCond c
unfold let va_get_whileBody (c:va_code{While? c}) : va_code = While?.whileBody c
(** Map syntax **)
//unfold let (.[]) (m:vale_heap) (b:M.buffer64) = fun index -> buffer64_read b index m
// syntax for map accesses, m.[key] and m.[key] <- value
(*
type map (key:eqtype) (value:Type) = Map.t key value
unfold let (.[]) = Map.sel
unfold let (.[]<-) = Map.upd
*)
(** Memory framing **)
(*
unfold let in_mem (addr:int) (m:mem) : bool = m `Map.contains` addr
let disjoint (ptr1:int) (num_bytes1:int) (ptr2:int) (num_bytes2:int) =
ptr1 + num_bytes1 <= ptr2 \/ ptr2 + num_bytes2 <= ptr1
let validSrcAddrs (mem:mem) (addr:int) (size:int) (num_bytes:int) =
size == 64 /\
(forall (a:int) . {:pattern (mem `Map.contains` a)} addr <= a && a < addr+num_bytes && (a - addr) % 8 = 0 ==> mem `Map.contains` a)
let memModified (old_mem:mem) (new_mem:mem) (ptr:int) (num_bytes) =
(forall (a:int) . {:pattern (new_mem `Map.contains` a)} old_mem `Map.contains` a <==> new_mem `Map.contains` a) /\
(forall (a:int) . {:pattern (new_mem.[a]) \/ Map.sel new_mem a} a < ptr || a >= ptr + num_bytes ==> old_mem.[a] == new_mem.[ a])
*)
(** Convenient memory-related functions **)
let rec buffers_readable (h: vale_heap) (l: list M.buffer64) : GTot prop0 (decreases l) =
match l with
| [] -> True
| b :: l' -> buffer_readable h b /\ buffers_readable h l'
unfold let modifies_buffer (b:M.buffer64) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer_2 (b1 b2:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer_3 (b1 b2 b3:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
unfold let modifies_buffer128 (b:M.buffer128) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer128_2 (b1 b2:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer128_3 (b1 b2 b3:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
let validSrcAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
buffer_readable h b /\
len <= buffer_length b /\
M.buffer_addr b h == addr /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) false /\
M.valid_taint_buf b h layout.vl_taint tn
let validDstAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) true /\
buffer_writeable b
let validSrcAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn
let validDstAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn
let validSrcAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn
let validDstAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn
let validSrcAddrsOffset128 (h:vale_heap) (addr:int) (b:M.buffer128) (offset len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h (addr - 16 * offset) b (len + offset) layout tn
let validDstAddrsOffset128 (h:vale_heap) (addr:int) (b:M.buffer128) (offset len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h (addr - 16 * offset) b (len + offset) layout tn
let modifies_buffer_specific128 (b:M.buffer128) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
modifies_buffer128 b h1 h2 /\
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer128_read b i h1
== buffer128_read b i h2)
let buffer_modifies_specific128 (b:M.buffer128) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer128_read b i h1
== buffer128_read b i h2)
let modifies_buffer_specific (b:M.buffer64) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
modifies_buffer b h1 h2 /\
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer64_read b i h1
== buffer64_read b i h2)
unfold let buffers_disjoint (b1 b2:M.buffer64) =
locs_disjoint [loc_buffer b1; loc_buffer b2] | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | b1: Vale.X64.Memory.buffer128 -> b2: Vale.X64.Memory.buffer128 -> Vale.Def.Prop_s.prop0 | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Memory.buffer128",
"Vale.X64.Decls.locs_disjoint",
"Prims.Cons",
"Vale.X64.Memory.loc",
"Vale.X64.Decls.loc_buffer",
"Vale.X64.Memory.vuint128",
"Prims.Nil",
"Vale.Def.Prop_s.prop0"
] | [] | false | false | false | true | false | let buffers_disjoint128 (b1 b2: M.buffer128) =
| locs_disjoint [loc_buffer b1; loc_buffer b2] | false |
|
Vale.X64.Decls.fsti | Vale.X64.Decls.va_upd_operand_reg_opr64 | val va_upd_operand_reg_opr64 : o: Vale.X64.Machine_s.operand64 -> v: Vale.Def.Types_s.nat64 -> s: Vale.X64.State.vale_state
-> Vale.X64.State.vale_state | let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 22,
"end_line": 321,
"start_col": 0,
"start_line": 316
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | o: Vale.X64.Machine_s.operand64 -> v: Vale.Def.Types_s.nat64 -> s: Vale.X64.State.vale_state
-> Vale.X64.State.vale_state | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Machine_s.operand64",
"Vale.Def.Types_s.nat64",
"Vale.X64.State.vale_state",
"Vale.X64.Machine_s.nat64",
"Vale.X64.Machine_s.reg_64",
"Vale.X64.State.update_reg_64",
"Vale.X64.Machine_s.maddr",
"Vale.Arch.HeapTypes_s.taint"
] | [] | false | false | false | true | false | let va_upd_operand_reg_opr64 (o: operand64) (v: nat64) (s: vale_state) =
| match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s | false |
|
Vale.X64.Decls.fsti | Vale.X64.Decls.buffers_disjoint | val buffers_disjoint : b1: Vale.X64.Memory.buffer64 -> b2: Vale.X64.Memory.buffer64 -> Vale.Def.Prop_s.prop0 | let buffers_disjoint (b1 b2:M.buffer64) =
locs_disjoint [loc_buffer b1; loc_buffer b2] | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 48,
"end_line": 458,
"start_col": 7,
"start_line": 457
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl
(** Constructors for va_code *)
unfold let va_Block (block:va_codes) : va_code = Block block
unfold let va_IfElse (ifCond:ocmp) (ifTrue:va_code) (ifFalse:va_code) : va_code = IfElse ifCond ifTrue ifFalse
unfold let va_While (whileCond:ocmp) (whileBody:va_code) : va_code = While whileCond whileBody
val va_cmp_eq (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ne (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_le (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ge (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_lt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_gt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
unfold let va_get_block (c:va_code{Block? c}) : va_codes = Block?.block c
unfold let va_get_ifCond (c:va_code{IfElse? c}) : ocmp = IfElse?.ifCond c
unfold let va_get_ifTrue (c:va_code{IfElse? c}) : va_code = IfElse?.ifTrue c
unfold let va_get_ifFalse (c:va_code{IfElse? c}) : va_code = IfElse?.ifFalse c
unfold let va_get_whileCond (c:va_code{While? c}) : ocmp = While?.whileCond c
unfold let va_get_whileBody (c:va_code{While? c}) : va_code = While?.whileBody c
(** Map syntax **)
//unfold let (.[]) (m:vale_heap) (b:M.buffer64) = fun index -> buffer64_read b index m
// syntax for map accesses, m.[key] and m.[key] <- value
(*
type map (key:eqtype) (value:Type) = Map.t key value
unfold let (.[]) = Map.sel
unfold let (.[]<-) = Map.upd
*)
(** Memory framing **)
(*
unfold let in_mem (addr:int) (m:mem) : bool = m `Map.contains` addr
let disjoint (ptr1:int) (num_bytes1:int) (ptr2:int) (num_bytes2:int) =
ptr1 + num_bytes1 <= ptr2 \/ ptr2 + num_bytes2 <= ptr1
let validSrcAddrs (mem:mem) (addr:int) (size:int) (num_bytes:int) =
size == 64 /\
(forall (a:int) . {:pattern (mem `Map.contains` a)} addr <= a && a < addr+num_bytes && (a - addr) % 8 = 0 ==> mem `Map.contains` a)
let memModified (old_mem:mem) (new_mem:mem) (ptr:int) (num_bytes) =
(forall (a:int) . {:pattern (new_mem `Map.contains` a)} old_mem `Map.contains` a <==> new_mem `Map.contains` a) /\
(forall (a:int) . {:pattern (new_mem.[a]) \/ Map.sel new_mem a} a < ptr || a >= ptr + num_bytes ==> old_mem.[a] == new_mem.[ a])
*)
(** Convenient memory-related functions **)
let rec buffers_readable (h: vale_heap) (l: list M.buffer64) : GTot prop0 (decreases l) =
match l with
| [] -> True
| b :: l' -> buffer_readable h b /\ buffers_readable h l'
unfold let modifies_buffer (b:M.buffer64) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer_2 (b1 b2:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer_3 (b1 b2 b3:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
unfold let modifies_buffer128 (b:M.buffer128) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer128_2 (b1 b2:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer128_3 (b1 b2 b3:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
let validSrcAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
buffer_readable h b /\
len <= buffer_length b /\
M.buffer_addr b h == addr /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) false /\
M.valid_taint_buf b h layout.vl_taint tn
let validDstAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) true /\
buffer_writeable b
let validSrcAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn
let validDstAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn
let validSrcAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn
let validDstAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn
let validSrcAddrsOffset128 (h:vale_heap) (addr:int) (b:M.buffer128) (offset len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h (addr - 16 * offset) b (len + offset) layout tn
let validDstAddrsOffset128 (h:vale_heap) (addr:int) (b:M.buffer128) (offset len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h (addr - 16 * offset) b (len + offset) layout tn
let modifies_buffer_specific128 (b:M.buffer128) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
modifies_buffer128 b h1 h2 /\
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer128_read b i h1
== buffer128_read b i h2)
let buffer_modifies_specific128 (b:M.buffer128) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer128_read b i h1
== buffer128_read b i h2)
let modifies_buffer_specific (b:M.buffer64) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
modifies_buffer b h1 h2 /\
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer64_read b i h1
== buffer64_read b i h2) | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | b1: Vale.X64.Memory.buffer64 -> b2: Vale.X64.Memory.buffer64 -> Vale.Def.Prop_s.prop0 | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Memory.buffer64",
"Vale.X64.Decls.locs_disjoint",
"Prims.Cons",
"Vale.X64.Memory.loc",
"Vale.X64.Decls.loc_buffer",
"Vale.X64.Memory.vuint64",
"Prims.Nil",
"Vale.Def.Prop_s.prop0"
] | [] | false | false | false | true | false | let buffers_disjoint (b1 b2: M.buffer64) =
| locs_disjoint [loc_buffer b1; loc_buffer b2] | false |
|
Vale.X64.Decls.fsti | Vale.X64.Decls.va_state_eq | val va_state_eq (s0 s1: va_state) : prop0 | val va_state_eq (s0 s1: va_state) : prop0 | let va_state_eq (s0:va_state) (s1:va_state) : prop0 = state_eq s0 s1 | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 68,
"end_line": 479,
"start_col": 0,
"start_line": 479
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl
(** Constructors for va_code *)
unfold let va_Block (block:va_codes) : va_code = Block block
unfold let va_IfElse (ifCond:ocmp) (ifTrue:va_code) (ifFalse:va_code) : va_code = IfElse ifCond ifTrue ifFalse
unfold let va_While (whileCond:ocmp) (whileBody:va_code) : va_code = While whileCond whileBody
val va_cmp_eq (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ne (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_le (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ge (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_lt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_gt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
unfold let va_get_block (c:va_code{Block? c}) : va_codes = Block?.block c
unfold let va_get_ifCond (c:va_code{IfElse? c}) : ocmp = IfElse?.ifCond c
unfold let va_get_ifTrue (c:va_code{IfElse? c}) : va_code = IfElse?.ifTrue c
unfold let va_get_ifFalse (c:va_code{IfElse? c}) : va_code = IfElse?.ifFalse c
unfold let va_get_whileCond (c:va_code{While? c}) : ocmp = While?.whileCond c
unfold let va_get_whileBody (c:va_code{While? c}) : va_code = While?.whileBody c
(** Map syntax **)
//unfold let (.[]) (m:vale_heap) (b:M.buffer64) = fun index -> buffer64_read b index m
// syntax for map accesses, m.[key] and m.[key] <- value
(*
type map (key:eqtype) (value:Type) = Map.t key value
unfold let (.[]) = Map.sel
unfold let (.[]<-) = Map.upd
*)
(** Memory framing **)
(*
unfold let in_mem (addr:int) (m:mem) : bool = m `Map.contains` addr
let disjoint (ptr1:int) (num_bytes1:int) (ptr2:int) (num_bytes2:int) =
ptr1 + num_bytes1 <= ptr2 \/ ptr2 + num_bytes2 <= ptr1
let validSrcAddrs (mem:mem) (addr:int) (size:int) (num_bytes:int) =
size == 64 /\
(forall (a:int) . {:pattern (mem `Map.contains` a)} addr <= a && a < addr+num_bytes && (a - addr) % 8 = 0 ==> mem `Map.contains` a)
let memModified (old_mem:mem) (new_mem:mem) (ptr:int) (num_bytes) =
(forall (a:int) . {:pattern (new_mem `Map.contains` a)} old_mem `Map.contains` a <==> new_mem `Map.contains` a) /\
(forall (a:int) . {:pattern (new_mem.[a]) \/ Map.sel new_mem a} a < ptr || a >= ptr + num_bytes ==> old_mem.[a] == new_mem.[ a])
*)
(** Convenient memory-related functions **)
let rec buffers_readable (h: vale_heap) (l: list M.buffer64) : GTot prop0 (decreases l) =
match l with
| [] -> True
| b :: l' -> buffer_readable h b /\ buffers_readable h l'
unfold let modifies_buffer (b:M.buffer64) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer_2 (b1 b2:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer_3 (b1 b2 b3:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
unfold let modifies_buffer128 (b:M.buffer128) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer128_2 (b1 b2:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer128_3 (b1 b2 b3:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
let validSrcAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
buffer_readable h b /\
len <= buffer_length b /\
M.buffer_addr b h == addr /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) false /\
M.valid_taint_buf b h layout.vl_taint tn
let validDstAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) true /\
buffer_writeable b
let validSrcAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn
let validDstAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn
let validSrcAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn
let validDstAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn
let validSrcAddrsOffset128 (h:vale_heap) (addr:int) (b:M.buffer128) (offset len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h (addr - 16 * offset) b (len + offset) layout tn
let validDstAddrsOffset128 (h:vale_heap) (addr:int) (b:M.buffer128) (offset len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h (addr - 16 * offset) b (len + offset) layout tn
let modifies_buffer_specific128 (b:M.buffer128) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
modifies_buffer128 b h1 h2 /\
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer128_read b i h1
== buffer128_read b i h2)
let buffer_modifies_specific128 (b:M.buffer128) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer128_read b i h1
== buffer128_read b i h2)
let modifies_buffer_specific (b:M.buffer64) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
modifies_buffer b h1 h2 /\
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer64_read b i h1
== buffer64_read b i h2)
unfold let buffers_disjoint (b1 b2:M.buffer64) =
locs_disjoint [loc_buffer b1; loc_buffer b2]
unfold let buffers_disjoint128 (b1 b2:M.buffer128) =
locs_disjoint [loc_buffer b1; loc_buffer b2]
let rec loc_locs_disjoint_rec128 (l:M.buffer128) (ls:list (M.buffer128)) : prop0 =
match ls with
| [] -> True
| h::t -> locs_disjoint [loc_buffer l; loc_buffer h] /\ loc_locs_disjoint_rec128 l t
unfold
let buffer_disjoints128 (l:M.buffer128) (ls:list (M.buffer128)) : prop0 =
norm [zeta; iota; delta_only [`%loc_locs_disjoint_rec128]] (loc_locs_disjoint_rec128 l ls)
unfold let buffers3_disjoint128 (b1 b2 b3:M.buffer128) =
locs_disjoint [loc_buffer b1; loc_buffer b2; loc_buffer b3]
val eval_code (c:va_code) (s0:va_state) (f0:va_fuel) (sN:va_state) : prop0
val eval_while_inv (c:va_code) (s0:va_state) (fW:va_fuel) (sW:va_state) : prop0 | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | s0: Vale.X64.Decls.va_state -> s1: Vale.X64.Decls.va_state -> Vale.Def.Prop_s.prop0 | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Decls.va_state",
"Vale.X64.State.state_eq",
"Vale.Def.Prop_s.prop0"
] | [] | false | false | false | true | false | let va_state_eq (s0 s1: va_state) : prop0 =
| state_eq s0 s1 | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.buffer_disjoints128 | val buffer_disjoints128 (l: M.buffer128) (ls: list (M.buffer128)) : prop0 | val buffer_disjoints128 (l: M.buffer128) (ls: list (M.buffer128)) : prop0 | let buffer_disjoints128 (l:M.buffer128) (ls:list (M.buffer128)) : prop0 =
norm [zeta; iota; delta_only [`%loc_locs_disjoint_rec128]] (loc_locs_disjoint_rec128 l ls) | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 92,
"end_line": 470,
"start_col": 0,
"start_line": 469
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl
(** Constructors for va_code *)
unfold let va_Block (block:va_codes) : va_code = Block block
unfold let va_IfElse (ifCond:ocmp) (ifTrue:va_code) (ifFalse:va_code) : va_code = IfElse ifCond ifTrue ifFalse
unfold let va_While (whileCond:ocmp) (whileBody:va_code) : va_code = While whileCond whileBody
val va_cmp_eq (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ne (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_le (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ge (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_lt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_gt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
unfold let va_get_block (c:va_code{Block? c}) : va_codes = Block?.block c
unfold let va_get_ifCond (c:va_code{IfElse? c}) : ocmp = IfElse?.ifCond c
unfold let va_get_ifTrue (c:va_code{IfElse? c}) : va_code = IfElse?.ifTrue c
unfold let va_get_ifFalse (c:va_code{IfElse? c}) : va_code = IfElse?.ifFalse c
unfold let va_get_whileCond (c:va_code{While? c}) : ocmp = While?.whileCond c
unfold let va_get_whileBody (c:va_code{While? c}) : va_code = While?.whileBody c
(** Map syntax **)
//unfold let (.[]) (m:vale_heap) (b:M.buffer64) = fun index -> buffer64_read b index m
// syntax for map accesses, m.[key] and m.[key] <- value
(*
type map (key:eqtype) (value:Type) = Map.t key value
unfold let (.[]) = Map.sel
unfold let (.[]<-) = Map.upd
*)
(** Memory framing **)
(*
unfold let in_mem (addr:int) (m:mem) : bool = m `Map.contains` addr
let disjoint (ptr1:int) (num_bytes1:int) (ptr2:int) (num_bytes2:int) =
ptr1 + num_bytes1 <= ptr2 \/ ptr2 + num_bytes2 <= ptr1
let validSrcAddrs (mem:mem) (addr:int) (size:int) (num_bytes:int) =
size == 64 /\
(forall (a:int) . {:pattern (mem `Map.contains` a)} addr <= a && a < addr+num_bytes && (a - addr) % 8 = 0 ==> mem `Map.contains` a)
let memModified (old_mem:mem) (new_mem:mem) (ptr:int) (num_bytes) =
(forall (a:int) . {:pattern (new_mem `Map.contains` a)} old_mem `Map.contains` a <==> new_mem `Map.contains` a) /\
(forall (a:int) . {:pattern (new_mem.[a]) \/ Map.sel new_mem a} a < ptr || a >= ptr + num_bytes ==> old_mem.[a] == new_mem.[ a])
*)
(** Convenient memory-related functions **)
let rec buffers_readable (h: vale_heap) (l: list M.buffer64) : GTot prop0 (decreases l) =
match l with
| [] -> True
| b :: l' -> buffer_readable h b /\ buffers_readable h l'
unfold let modifies_buffer (b:M.buffer64) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer_2 (b1 b2:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer_3 (b1 b2 b3:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
unfold let modifies_buffer128 (b:M.buffer128) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer128_2 (b1 b2:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer128_3 (b1 b2 b3:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
let validSrcAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
buffer_readable h b /\
len <= buffer_length b /\
M.buffer_addr b h == addr /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) false /\
M.valid_taint_buf b h layout.vl_taint tn
let validDstAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) true /\
buffer_writeable b
let validSrcAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn
let validDstAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn
let validSrcAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn
let validDstAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn
let validSrcAddrsOffset128 (h:vale_heap) (addr:int) (b:M.buffer128) (offset len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h (addr - 16 * offset) b (len + offset) layout tn
let validDstAddrsOffset128 (h:vale_heap) (addr:int) (b:M.buffer128) (offset len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h (addr - 16 * offset) b (len + offset) layout tn
let modifies_buffer_specific128 (b:M.buffer128) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
modifies_buffer128 b h1 h2 /\
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer128_read b i h1
== buffer128_read b i h2)
let buffer_modifies_specific128 (b:M.buffer128) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer128_read b i h1
== buffer128_read b i h2)
let modifies_buffer_specific (b:M.buffer64) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
modifies_buffer b h1 h2 /\
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer64_read b i h1
== buffer64_read b i h2)
unfold let buffers_disjoint (b1 b2:M.buffer64) =
locs_disjoint [loc_buffer b1; loc_buffer b2]
unfold let buffers_disjoint128 (b1 b2:M.buffer128) =
locs_disjoint [loc_buffer b1; loc_buffer b2]
let rec loc_locs_disjoint_rec128 (l:M.buffer128) (ls:list (M.buffer128)) : prop0 =
match ls with
| [] -> True
| h::t -> locs_disjoint [loc_buffer l; loc_buffer h] /\ loc_locs_disjoint_rec128 l t | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | l: Vale.X64.Memory.buffer128 -> ls: Prims.list Vale.X64.Memory.buffer128 -> Vale.Def.Prop_s.prop0 | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Memory.buffer128",
"Prims.list",
"FStar.Pervasives.norm",
"Prims.Cons",
"FStar.Pervasives.norm_step",
"FStar.Pervasives.zeta",
"FStar.Pervasives.iota",
"FStar.Pervasives.delta_only",
"Prims.string",
"Prims.Nil",
"Vale.Def.Prop_s.prop0",
"Vale.X64.Decls.loc_locs_disjoint_rec128"
] | [] | false | false | false | true | false | let buffer_disjoints128 (l: M.buffer128) (ls: list (M.buffer128)) : prop0 =
| norm [zeta; iota; delta_only [`%loc_locs_disjoint_rec128]] (loc_locs_disjoint_rec128 l ls) | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.buffers3_disjoint128 | val buffers3_disjoint128 : b1: Vale.X64.Memory.buffer128 -> b2: Vale.X64.Memory.buffer128 -> b3: Vale.X64.Memory.buffer128
-> Vale.Def.Prop_s.prop0 | let buffers3_disjoint128 (b1 b2 b3:M.buffer128) =
locs_disjoint [loc_buffer b1; loc_buffer b2; loc_buffer b3] | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 63,
"end_line": 473,
"start_col": 7,
"start_line": 472
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl
(** Constructors for va_code *)
unfold let va_Block (block:va_codes) : va_code = Block block
unfold let va_IfElse (ifCond:ocmp) (ifTrue:va_code) (ifFalse:va_code) : va_code = IfElse ifCond ifTrue ifFalse
unfold let va_While (whileCond:ocmp) (whileBody:va_code) : va_code = While whileCond whileBody
val va_cmp_eq (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ne (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_le (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ge (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_lt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_gt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
unfold let va_get_block (c:va_code{Block? c}) : va_codes = Block?.block c
unfold let va_get_ifCond (c:va_code{IfElse? c}) : ocmp = IfElse?.ifCond c
unfold let va_get_ifTrue (c:va_code{IfElse? c}) : va_code = IfElse?.ifTrue c
unfold let va_get_ifFalse (c:va_code{IfElse? c}) : va_code = IfElse?.ifFalse c
unfold let va_get_whileCond (c:va_code{While? c}) : ocmp = While?.whileCond c
unfold let va_get_whileBody (c:va_code{While? c}) : va_code = While?.whileBody c
(** Map syntax **)
//unfold let (.[]) (m:vale_heap) (b:M.buffer64) = fun index -> buffer64_read b index m
// syntax for map accesses, m.[key] and m.[key] <- value
(*
type map (key:eqtype) (value:Type) = Map.t key value
unfold let (.[]) = Map.sel
unfold let (.[]<-) = Map.upd
*)
(** Memory framing **)
(*
unfold let in_mem (addr:int) (m:mem) : bool = m `Map.contains` addr
let disjoint (ptr1:int) (num_bytes1:int) (ptr2:int) (num_bytes2:int) =
ptr1 + num_bytes1 <= ptr2 \/ ptr2 + num_bytes2 <= ptr1
let validSrcAddrs (mem:mem) (addr:int) (size:int) (num_bytes:int) =
size == 64 /\
(forall (a:int) . {:pattern (mem `Map.contains` a)} addr <= a && a < addr+num_bytes && (a - addr) % 8 = 0 ==> mem `Map.contains` a)
let memModified (old_mem:mem) (new_mem:mem) (ptr:int) (num_bytes) =
(forall (a:int) . {:pattern (new_mem `Map.contains` a)} old_mem `Map.contains` a <==> new_mem `Map.contains` a) /\
(forall (a:int) . {:pattern (new_mem.[a]) \/ Map.sel new_mem a} a < ptr || a >= ptr + num_bytes ==> old_mem.[a] == new_mem.[ a])
*)
(** Convenient memory-related functions **)
let rec buffers_readable (h: vale_heap) (l: list M.buffer64) : GTot prop0 (decreases l) =
match l with
| [] -> True
| b :: l' -> buffer_readable h b /\ buffers_readable h l'
unfold let modifies_buffer (b:M.buffer64) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer_2 (b1 b2:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer_3 (b1 b2 b3:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
unfold let modifies_buffer128 (b:M.buffer128) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer128_2 (b1 b2:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer128_3 (b1 b2 b3:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
let validSrcAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
buffer_readable h b /\
len <= buffer_length b /\
M.buffer_addr b h == addr /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) false /\
M.valid_taint_buf b h layout.vl_taint tn
let validDstAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) true /\
buffer_writeable b
let validSrcAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn
let validDstAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn
let validSrcAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn
let validDstAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn
let validSrcAddrsOffset128 (h:vale_heap) (addr:int) (b:M.buffer128) (offset len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h (addr - 16 * offset) b (len + offset) layout tn
let validDstAddrsOffset128 (h:vale_heap) (addr:int) (b:M.buffer128) (offset len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h (addr - 16 * offset) b (len + offset) layout tn
let modifies_buffer_specific128 (b:M.buffer128) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
modifies_buffer128 b h1 h2 /\
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer128_read b i h1
== buffer128_read b i h2)
let buffer_modifies_specific128 (b:M.buffer128) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer128_read b i h1
== buffer128_read b i h2)
let modifies_buffer_specific (b:M.buffer64) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
modifies_buffer b h1 h2 /\
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer64_read b i h1
== buffer64_read b i h2)
unfold let buffers_disjoint (b1 b2:M.buffer64) =
locs_disjoint [loc_buffer b1; loc_buffer b2]
unfold let buffers_disjoint128 (b1 b2:M.buffer128) =
locs_disjoint [loc_buffer b1; loc_buffer b2]
let rec loc_locs_disjoint_rec128 (l:M.buffer128) (ls:list (M.buffer128)) : prop0 =
match ls with
| [] -> True
| h::t -> locs_disjoint [loc_buffer l; loc_buffer h] /\ loc_locs_disjoint_rec128 l t
unfold
let buffer_disjoints128 (l:M.buffer128) (ls:list (M.buffer128)) : prop0 =
norm [zeta; iota; delta_only [`%loc_locs_disjoint_rec128]] (loc_locs_disjoint_rec128 l ls) | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | b1: Vale.X64.Memory.buffer128 -> b2: Vale.X64.Memory.buffer128 -> b3: Vale.X64.Memory.buffer128
-> Vale.Def.Prop_s.prop0 | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Memory.buffer128",
"Vale.X64.Decls.locs_disjoint",
"Prims.Cons",
"Vale.X64.Memory.loc",
"Vale.X64.Decls.loc_buffer",
"Vale.X64.Memory.vuint128",
"Prims.Nil",
"Vale.Def.Prop_s.prop0"
] | [] | false | false | false | true | false | let buffers3_disjoint128 (b1 b2 b3: M.buffer128) =
| locs_disjoint [loc_buffer b1; loc_buffer b2; loc_buffer b3] | false |
|
Vale.X64.Decls.fsti | Vale.X64.Decls.state_inv | val state_inv (s: va_state) : prop0 | val state_inv (s: va_state) : prop0 | let state_inv (s:va_state) : prop0 = M.mem_inv s.vs_heap | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 56,
"end_line": 481,
"start_col": 0,
"start_line": 481
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl
(** Constructors for va_code *)
unfold let va_Block (block:va_codes) : va_code = Block block
unfold let va_IfElse (ifCond:ocmp) (ifTrue:va_code) (ifFalse:va_code) : va_code = IfElse ifCond ifTrue ifFalse
unfold let va_While (whileCond:ocmp) (whileBody:va_code) : va_code = While whileCond whileBody
val va_cmp_eq (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ne (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_le (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ge (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_lt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_gt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
unfold let va_get_block (c:va_code{Block? c}) : va_codes = Block?.block c
unfold let va_get_ifCond (c:va_code{IfElse? c}) : ocmp = IfElse?.ifCond c
unfold let va_get_ifTrue (c:va_code{IfElse? c}) : va_code = IfElse?.ifTrue c
unfold let va_get_ifFalse (c:va_code{IfElse? c}) : va_code = IfElse?.ifFalse c
unfold let va_get_whileCond (c:va_code{While? c}) : ocmp = While?.whileCond c
unfold let va_get_whileBody (c:va_code{While? c}) : va_code = While?.whileBody c
(** Map syntax **)
//unfold let (.[]) (m:vale_heap) (b:M.buffer64) = fun index -> buffer64_read b index m
// syntax for map accesses, m.[key] and m.[key] <- value
(*
type map (key:eqtype) (value:Type) = Map.t key value
unfold let (.[]) = Map.sel
unfold let (.[]<-) = Map.upd
*)
(** Memory framing **)
(*
unfold let in_mem (addr:int) (m:mem) : bool = m `Map.contains` addr
let disjoint (ptr1:int) (num_bytes1:int) (ptr2:int) (num_bytes2:int) =
ptr1 + num_bytes1 <= ptr2 \/ ptr2 + num_bytes2 <= ptr1
let validSrcAddrs (mem:mem) (addr:int) (size:int) (num_bytes:int) =
size == 64 /\
(forall (a:int) . {:pattern (mem `Map.contains` a)} addr <= a && a < addr+num_bytes && (a - addr) % 8 = 0 ==> mem `Map.contains` a)
let memModified (old_mem:mem) (new_mem:mem) (ptr:int) (num_bytes) =
(forall (a:int) . {:pattern (new_mem `Map.contains` a)} old_mem `Map.contains` a <==> new_mem `Map.contains` a) /\
(forall (a:int) . {:pattern (new_mem.[a]) \/ Map.sel new_mem a} a < ptr || a >= ptr + num_bytes ==> old_mem.[a] == new_mem.[ a])
*)
(** Convenient memory-related functions **)
let rec buffers_readable (h: vale_heap) (l: list M.buffer64) : GTot prop0 (decreases l) =
match l with
| [] -> True
| b :: l' -> buffer_readable h b /\ buffers_readable h l'
unfold let modifies_buffer (b:M.buffer64) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer_2 (b1 b2:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer_3 (b1 b2 b3:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
unfold let modifies_buffer128 (b:M.buffer128) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer128_2 (b1 b2:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer128_3 (b1 b2 b3:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
let validSrcAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
buffer_readable h b /\
len <= buffer_length b /\
M.buffer_addr b h == addr /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) false /\
M.valid_taint_buf b h layout.vl_taint tn
let validDstAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) true /\
buffer_writeable b
let validSrcAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn
let validDstAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn
let validSrcAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn
let validDstAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn
let validSrcAddrsOffset128 (h:vale_heap) (addr:int) (b:M.buffer128) (offset len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h (addr - 16 * offset) b (len + offset) layout tn
let validDstAddrsOffset128 (h:vale_heap) (addr:int) (b:M.buffer128) (offset len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h (addr - 16 * offset) b (len + offset) layout tn
let modifies_buffer_specific128 (b:M.buffer128) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
modifies_buffer128 b h1 h2 /\
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer128_read b i h1
== buffer128_read b i h2)
let buffer_modifies_specific128 (b:M.buffer128) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer128_read b i h1
== buffer128_read b i h2)
let modifies_buffer_specific (b:M.buffer64) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
modifies_buffer b h1 h2 /\
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer64_read b i h1
== buffer64_read b i h2)
unfold let buffers_disjoint (b1 b2:M.buffer64) =
locs_disjoint [loc_buffer b1; loc_buffer b2]
unfold let buffers_disjoint128 (b1 b2:M.buffer128) =
locs_disjoint [loc_buffer b1; loc_buffer b2]
let rec loc_locs_disjoint_rec128 (l:M.buffer128) (ls:list (M.buffer128)) : prop0 =
match ls with
| [] -> True
| h::t -> locs_disjoint [loc_buffer l; loc_buffer h] /\ loc_locs_disjoint_rec128 l t
unfold
let buffer_disjoints128 (l:M.buffer128) (ls:list (M.buffer128)) : prop0 =
norm [zeta; iota; delta_only [`%loc_locs_disjoint_rec128]] (loc_locs_disjoint_rec128 l ls)
unfold let buffers3_disjoint128 (b1 b2 b3:M.buffer128) =
locs_disjoint [loc_buffer b1; loc_buffer b2; loc_buffer b3]
val eval_code (c:va_code) (s0:va_state) (f0:va_fuel) (sN:va_state) : prop0
val eval_while_inv (c:va_code) (s0:va_state) (fW:va_fuel) (sW:va_state) : prop0
[@va_qattr]
let va_state_eq (s0:va_state) (s1:va_state) : prop0 = state_eq s0 s1 | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | s: Vale.X64.Decls.va_state -> Vale.Def.Prop_s.prop0 | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Decls.va_state",
"Vale.X64.Memory.mem_inv",
"Vale.X64.State.__proj__Mkvale_state__item__vs_heap",
"Vale.Def.Prop_s.prop0"
] | [] | false | false | false | true | false | let state_inv (s: va_state) : prop0 =
| M.mem_inv s.vs_heap | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.vale_state_with_inv | val vale_state_with_inv : Type | let vale_state_with_inv = s:va_state{state_inv s} | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 49,
"end_line": 483,
"start_col": 0,
"start_line": 483
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl
(** Constructors for va_code *)
unfold let va_Block (block:va_codes) : va_code = Block block
unfold let va_IfElse (ifCond:ocmp) (ifTrue:va_code) (ifFalse:va_code) : va_code = IfElse ifCond ifTrue ifFalse
unfold let va_While (whileCond:ocmp) (whileBody:va_code) : va_code = While whileCond whileBody
val va_cmp_eq (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ne (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_le (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ge (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_lt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_gt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
unfold let va_get_block (c:va_code{Block? c}) : va_codes = Block?.block c
unfold let va_get_ifCond (c:va_code{IfElse? c}) : ocmp = IfElse?.ifCond c
unfold let va_get_ifTrue (c:va_code{IfElse? c}) : va_code = IfElse?.ifTrue c
unfold let va_get_ifFalse (c:va_code{IfElse? c}) : va_code = IfElse?.ifFalse c
unfold let va_get_whileCond (c:va_code{While? c}) : ocmp = While?.whileCond c
unfold let va_get_whileBody (c:va_code{While? c}) : va_code = While?.whileBody c
(** Map syntax **)
//unfold let (.[]) (m:vale_heap) (b:M.buffer64) = fun index -> buffer64_read b index m
// syntax for map accesses, m.[key] and m.[key] <- value
(*
type map (key:eqtype) (value:Type) = Map.t key value
unfold let (.[]) = Map.sel
unfold let (.[]<-) = Map.upd
*)
(** Memory framing **)
(*
unfold let in_mem (addr:int) (m:mem) : bool = m `Map.contains` addr
let disjoint (ptr1:int) (num_bytes1:int) (ptr2:int) (num_bytes2:int) =
ptr1 + num_bytes1 <= ptr2 \/ ptr2 + num_bytes2 <= ptr1
let validSrcAddrs (mem:mem) (addr:int) (size:int) (num_bytes:int) =
size == 64 /\
(forall (a:int) . {:pattern (mem `Map.contains` a)} addr <= a && a < addr+num_bytes && (a - addr) % 8 = 0 ==> mem `Map.contains` a)
let memModified (old_mem:mem) (new_mem:mem) (ptr:int) (num_bytes) =
(forall (a:int) . {:pattern (new_mem `Map.contains` a)} old_mem `Map.contains` a <==> new_mem `Map.contains` a) /\
(forall (a:int) . {:pattern (new_mem.[a]) \/ Map.sel new_mem a} a < ptr || a >= ptr + num_bytes ==> old_mem.[a] == new_mem.[ a])
*)
(** Convenient memory-related functions **)
let rec buffers_readable (h: vale_heap) (l: list M.buffer64) : GTot prop0 (decreases l) =
match l with
| [] -> True
| b :: l' -> buffer_readable h b /\ buffers_readable h l'
unfold let modifies_buffer (b:M.buffer64) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer_2 (b1 b2:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer_3 (b1 b2 b3:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
unfold let modifies_buffer128 (b:M.buffer128) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer128_2 (b1 b2:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer128_3 (b1 b2 b3:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
let validSrcAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
buffer_readable h b /\
len <= buffer_length b /\
M.buffer_addr b h == addr /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) false /\
M.valid_taint_buf b h layout.vl_taint tn
let validDstAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) true /\
buffer_writeable b
let validSrcAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn
let validDstAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn
let validSrcAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn
let validDstAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn
let validSrcAddrsOffset128 (h:vale_heap) (addr:int) (b:M.buffer128) (offset len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h (addr - 16 * offset) b (len + offset) layout tn
let validDstAddrsOffset128 (h:vale_heap) (addr:int) (b:M.buffer128) (offset len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h (addr - 16 * offset) b (len + offset) layout tn
let modifies_buffer_specific128 (b:M.buffer128) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
modifies_buffer128 b h1 h2 /\
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer128_read b i h1
== buffer128_read b i h2)
let buffer_modifies_specific128 (b:M.buffer128) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer128_read b i h1
== buffer128_read b i h2)
let modifies_buffer_specific (b:M.buffer64) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
modifies_buffer b h1 h2 /\
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer64_read b i h1
== buffer64_read b i h2)
unfold let buffers_disjoint (b1 b2:M.buffer64) =
locs_disjoint [loc_buffer b1; loc_buffer b2]
unfold let buffers_disjoint128 (b1 b2:M.buffer128) =
locs_disjoint [loc_buffer b1; loc_buffer b2]
let rec loc_locs_disjoint_rec128 (l:M.buffer128) (ls:list (M.buffer128)) : prop0 =
match ls with
| [] -> True
| h::t -> locs_disjoint [loc_buffer l; loc_buffer h] /\ loc_locs_disjoint_rec128 l t
unfold
let buffer_disjoints128 (l:M.buffer128) (ls:list (M.buffer128)) : prop0 =
norm [zeta; iota; delta_only [`%loc_locs_disjoint_rec128]] (loc_locs_disjoint_rec128 l ls)
unfold let buffers3_disjoint128 (b1 b2 b3:M.buffer128) =
locs_disjoint [loc_buffer b1; loc_buffer b2; loc_buffer b3]
val eval_code (c:va_code) (s0:va_state) (f0:va_fuel) (sN:va_state) : prop0
val eval_while_inv (c:va_code) (s0:va_state) (fW:va_fuel) (sW:va_state) : prop0
[@va_qattr]
let va_state_eq (s0:va_state) (s1:va_state) : prop0 = state_eq s0 s1
let state_inv (s:va_state) : prop0 = M.mem_inv s.vs_heap | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | Type | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Decls.va_state",
"Vale.X64.Decls.state_inv"
] | [] | false | false | false | true | true | let vale_state_with_inv =
| s: va_state{state_inv s} | false |
|
Vale.X64.Decls.fsti | Vale.X64.Decls.va_require_total | val va_require_total (c0 c1: va_code) (s0: va_state) : prop0 | val va_require_total (c0 c1: va_code) (s0: va_state) : prop0 | let va_require_total (c0:va_code) (c1:va_code) (s0:va_state) : prop0 =
c0 == c1 /\ state_inv s0 | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 26,
"end_line": 486,
"start_col": 0,
"start_line": 485
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl
(** Constructors for va_code *)
unfold let va_Block (block:va_codes) : va_code = Block block
unfold let va_IfElse (ifCond:ocmp) (ifTrue:va_code) (ifFalse:va_code) : va_code = IfElse ifCond ifTrue ifFalse
unfold let va_While (whileCond:ocmp) (whileBody:va_code) : va_code = While whileCond whileBody
val va_cmp_eq (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ne (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_le (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ge (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_lt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_gt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
unfold let va_get_block (c:va_code{Block? c}) : va_codes = Block?.block c
unfold let va_get_ifCond (c:va_code{IfElse? c}) : ocmp = IfElse?.ifCond c
unfold let va_get_ifTrue (c:va_code{IfElse? c}) : va_code = IfElse?.ifTrue c
unfold let va_get_ifFalse (c:va_code{IfElse? c}) : va_code = IfElse?.ifFalse c
unfold let va_get_whileCond (c:va_code{While? c}) : ocmp = While?.whileCond c
unfold let va_get_whileBody (c:va_code{While? c}) : va_code = While?.whileBody c
(** Map syntax **)
//unfold let (.[]) (m:vale_heap) (b:M.buffer64) = fun index -> buffer64_read b index m
// syntax for map accesses, m.[key] and m.[key] <- value
(*
type map (key:eqtype) (value:Type) = Map.t key value
unfold let (.[]) = Map.sel
unfold let (.[]<-) = Map.upd
*)
(** Memory framing **)
(*
unfold let in_mem (addr:int) (m:mem) : bool = m `Map.contains` addr
let disjoint (ptr1:int) (num_bytes1:int) (ptr2:int) (num_bytes2:int) =
ptr1 + num_bytes1 <= ptr2 \/ ptr2 + num_bytes2 <= ptr1
let validSrcAddrs (mem:mem) (addr:int) (size:int) (num_bytes:int) =
size == 64 /\
(forall (a:int) . {:pattern (mem `Map.contains` a)} addr <= a && a < addr+num_bytes && (a - addr) % 8 = 0 ==> mem `Map.contains` a)
let memModified (old_mem:mem) (new_mem:mem) (ptr:int) (num_bytes) =
(forall (a:int) . {:pattern (new_mem `Map.contains` a)} old_mem `Map.contains` a <==> new_mem `Map.contains` a) /\
(forall (a:int) . {:pattern (new_mem.[a]) \/ Map.sel new_mem a} a < ptr || a >= ptr + num_bytes ==> old_mem.[a] == new_mem.[ a])
*)
(** Convenient memory-related functions **)
let rec buffers_readable (h: vale_heap) (l: list M.buffer64) : GTot prop0 (decreases l) =
match l with
| [] -> True
| b :: l' -> buffer_readable h b /\ buffers_readable h l'
unfold let modifies_buffer (b:M.buffer64) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer_2 (b1 b2:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer_3 (b1 b2 b3:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
unfold let modifies_buffer128 (b:M.buffer128) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer128_2 (b1 b2:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer128_3 (b1 b2 b3:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
let validSrcAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
buffer_readable h b /\
len <= buffer_length b /\
M.buffer_addr b h == addr /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) false /\
M.valid_taint_buf b h layout.vl_taint tn
let validDstAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) true /\
buffer_writeable b
let validSrcAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn
let validDstAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn
let validSrcAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn
let validDstAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn
let validSrcAddrsOffset128 (h:vale_heap) (addr:int) (b:M.buffer128) (offset len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h (addr - 16 * offset) b (len + offset) layout tn
let validDstAddrsOffset128 (h:vale_heap) (addr:int) (b:M.buffer128) (offset len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h (addr - 16 * offset) b (len + offset) layout tn
let modifies_buffer_specific128 (b:M.buffer128) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
modifies_buffer128 b h1 h2 /\
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer128_read b i h1
== buffer128_read b i h2)
let buffer_modifies_specific128 (b:M.buffer128) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer128_read b i h1
== buffer128_read b i h2)
let modifies_buffer_specific (b:M.buffer64) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
modifies_buffer b h1 h2 /\
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer64_read b i h1
== buffer64_read b i h2)
unfold let buffers_disjoint (b1 b2:M.buffer64) =
locs_disjoint [loc_buffer b1; loc_buffer b2]
unfold let buffers_disjoint128 (b1 b2:M.buffer128) =
locs_disjoint [loc_buffer b1; loc_buffer b2]
let rec loc_locs_disjoint_rec128 (l:M.buffer128) (ls:list (M.buffer128)) : prop0 =
match ls with
| [] -> True
| h::t -> locs_disjoint [loc_buffer l; loc_buffer h] /\ loc_locs_disjoint_rec128 l t
unfold
let buffer_disjoints128 (l:M.buffer128) (ls:list (M.buffer128)) : prop0 =
norm [zeta; iota; delta_only [`%loc_locs_disjoint_rec128]] (loc_locs_disjoint_rec128 l ls)
unfold let buffers3_disjoint128 (b1 b2 b3:M.buffer128) =
locs_disjoint [loc_buffer b1; loc_buffer b2; loc_buffer b3]
val eval_code (c:va_code) (s0:va_state) (f0:va_fuel) (sN:va_state) : prop0
val eval_while_inv (c:va_code) (s0:va_state) (fW:va_fuel) (sW:va_state) : prop0
[@va_qattr]
let va_state_eq (s0:va_state) (s1:va_state) : prop0 = state_eq s0 s1
let state_inv (s:va_state) : prop0 = M.mem_inv s.vs_heap
let vale_state_with_inv = s:va_state{state_inv s} | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | c0: Vale.X64.Decls.va_code -> c1: Vale.X64.Decls.va_code -> s0: Vale.X64.Decls.va_state
-> Vale.Def.Prop_s.prop0 | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Decls.va_code",
"Vale.X64.Decls.va_state",
"Prims.l_and",
"Prims.eq2",
"Vale.X64.Decls.state_inv",
"Vale.Def.Prop_s.prop0"
] | [] | false | false | false | true | false | let va_require_total (c0 c1: va_code) (s0: va_state) : prop0 =
| c0 == c1 /\ state_inv s0 | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.va_evalCond | val va_evalCond (b: ocmp) (s: va_state) : GTot bool | val va_evalCond (b: ocmp) (s: va_state) : GTot bool | let va_evalCond (b:ocmp) (s:va_state) : GTot bool = eval_ocmp s b | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 72,
"end_line": 492,
"start_col": 7,
"start_line": 492
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl
(** Constructors for va_code *)
unfold let va_Block (block:va_codes) : va_code = Block block
unfold let va_IfElse (ifCond:ocmp) (ifTrue:va_code) (ifFalse:va_code) : va_code = IfElse ifCond ifTrue ifFalse
unfold let va_While (whileCond:ocmp) (whileBody:va_code) : va_code = While whileCond whileBody
val va_cmp_eq (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ne (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_le (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ge (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_lt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_gt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
unfold let va_get_block (c:va_code{Block? c}) : va_codes = Block?.block c
unfold let va_get_ifCond (c:va_code{IfElse? c}) : ocmp = IfElse?.ifCond c
unfold let va_get_ifTrue (c:va_code{IfElse? c}) : va_code = IfElse?.ifTrue c
unfold let va_get_ifFalse (c:va_code{IfElse? c}) : va_code = IfElse?.ifFalse c
unfold let va_get_whileCond (c:va_code{While? c}) : ocmp = While?.whileCond c
unfold let va_get_whileBody (c:va_code{While? c}) : va_code = While?.whileBody c
(** Map syntax **)
//unfold let (.[]) (m:vale_heap) (b:M.buffer64) = fun index -> buffer64_read b index m
// syntax for map accesses, m.[key] and m.[key] <- value
(*
type map (key:eqtype) (value:Type) = Map.t key value
unfold let (.[]) = Map.sel
unfold let (.[]<-) = Map.upd
*)
(** Memory framing **)
(*
unfold let in_mem (addr:int) (m:mem) : bool = m `Map.contains` addr
let disjoint (ptr1:int) (num_bytes1:int) (ptr2:int) (num_bytes2:int) =
ptr1 + num_bytes1 <= ptr2 \/ ptr2 + num_bytes2 <= ptr1
let validSrcAddrs (mem:mem) (addr:int) (size:int) (num_bytes:int) =
size == 64 /\
(forall (a:int) . {:pattern (mem `Map.contains` a)} addr <= a && a < addr+num_bytes && (a - addr) % 8 = 0 ==> mem `Map.contains` a)
let memModified (old_mem:mem) (new_mem:mem) (ptr:int) (num_bytes) =
(forall (a:int) . {:pattern (new_mem `Map.contains` a)} old_mem `Map.contains` a <==> new_mem `Map.contains` a) /\
(forall (a:int) . {:pattern (new_mem.[a]) \/ Map.sel new_mem a} a < ptr || a >= ptr + num_bytes ==> old_mem.[a] == new_mem.[ a])
*)
(** Convenient memory-related functions **)
let rec buffers_readable (h: vale_heap) (l: list M.buffer64) : GTot prop0 (decreases l) =
match l with
| [] -> True
| b :: l' -> buffer_readable h b /\ buffers_readable h l'
unfold let modifies_buffer (b:M.buffer64) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer_2 (b1 b2:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer_3 (b1 b2 b3:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
unfold let modifies_buffer128 (b:M.buffer128) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer128_2 (b1 b2:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer128_3 (b1 b2 b3:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
let validSrcAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
buffer_readable h b /\
len <= buffer_length b /\
M.buffer_addr b h == addr /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) false /\
M.valid_taint_buf b h layout.vl_taint tn
let validDstAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) true /\
buffer_writeable b
let validSrcAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn
let validDstAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn
let validSrcAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn
let validDstAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn
let validSrcAddrsOffset128 (h:vale_heap) (addr:int) (b:M.buffer128) (offset len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h (addr - 16 * offset) b (len + offset) layout tn
let validDstAddrsOffset128 (h:vale_heap) (addr:int) (b:M.buffer128) (offset len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h (addr - 16 * offset) b (len + offset) layout tn
let modifies_buffer_specific128 (b:M.buffer128) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
modifies_buffer128 b h1 h2 /\
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer128_read b i h1
== buffer128_read b i h2)
let buffer_modifies_specific128 (b:M.buffer128) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer128_read b i h1
== buffer128_read b i h2)
let modifies_buffer_specific (b:M.buffer64) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
modifies_buffer b h1 h2 /\
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer64_read b i h1
== buffer64_read b i h2)
unfold let buffers_disjoint (b1 b2:M.buffer64) =
locs_disjoint [loc_buffer b1; loc_buffer b2]
unfold let buffers_disjoint128 (b1 b2:M.buffer128) =
locs_disjoint [loc_buffer b1; loc_buffer b2]
let rec loc_locs_disjoint_rec128 (l:M.buffer128) (ls:list (M.buffer128)) : prop0 =
match ls with
| [] -> True
| h::t -> locs_disjoint [loc_buffer l; loc_buffer h] /\ loc_locs_disjoint_rec128 l t
unfold
let buffer_disjoints128 (l:M.buffer128) (ls:list (M.buffer128)) : prop0 =
norm [zeta; iota; delta_only [`%loc_locs_disjoint_rec128]] (loc_locs_disjoint_rec128 l ls)
unfold let buffers3_disjoint128 (b1 b2 b3:M.buffer128) =
locs_disjoint [loc_buffer b1; loc_buffer b2; loc_buffer b3]
val eval_code (c:va_code) (s0:va_state) (f0:va_fuel) (sN:va_state) : prop0
val eval_while_inv (c:va_code) (s0:va_state) (fW:va_fuel) (sW:va_state) : prop0
[@va_qattr]
let va_state_eq (s0:va_state) (s1:va_state) : prop0 = state_eq s0 s1
let state_inv (s:va_state) : prop0 = M.mem_inv s.vs_heap
let vale_state_with_inv = s:va_state{state_inv s}
let va_require_total (c0:va_code) (c1:va_code) (s0:va_state) : prop0 =
c0 == c1 /\ state_inv s0
let va_ensure_total (c0:va_code) (s0:va_state) (s1:va_state) (f1:va_fuel) : prop0 =
eval_code c0 s0 f1 s1 /\ state_inv s1 | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | b: Vale.X64.Decls.ocmp -> s: Vale.X64.Decls.va_state -> Prims.GTot Prims.bool | Prims.GTot | [
"sometrivial"
] | [] | [
"Vale.X64.Decls.ocmp",
"Vale.X64.Decls.va_state",
"Vale.X64.Decls.eval_ocmp",
"Prims.bool"
] | [] | false | false | false | false | false | let va_evalCond (b: ocmp) (s: va_state) : GTot bool =
| eval_ocmp s b | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.va_ensure_total | val va_ensure_total (c0: va_code) (s0 s1: va_state) (f1: va_fuel) : prop0 | val va_ensure_total (c0: va_code) (s0 s1: va_state) (f1: va_fuel) : prop0 | let va_ensure_total (c0:va_code) (s0:va_state) (s1:va_state) (f1:va_fuel) : prop0 =
eval_code c0 s0 f1 s1 /\ state_inv s1 | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 39,
"end_line": 489,
"start_col": 0,
"start_line": 488
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl
(** Constructors for va_code *)
unfold let va_Block (block:va_codes) : va_code = Block block
unfold let va_IfElse (ifCond:ocmp) (ifTrue:va_code) (ifFalse:va_code) : va_code = IfElse ifCond ifTrue ifFalse
unfold let va_While (whileCond:ocmp) (whileBody:va_code) : va_code = While whileCond whileBody
val va_cmp_eq (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ne (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_le (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ge (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_lt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_gt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
unfold let va_get_block (c:va_code{Block? c}) : va_codes = Block?.block c
unfold let va_get_ifCond (c:va_code{IfElse? c}) : ocmp = IfElse?.ifCond c
unfold let va_get_ifTrue (c:va_code{IfElse? c}) : va_code = IfElse?.ifTrue c
unfold let va_get_ifFalse (c:va_code{IfElse? c}) : va_code = IfElse?.ifFalse c
unfold let va_get_whileCond (c:va_code{While? c}) : ocmp = While?.whileCond c
unfold let va_get_whileBody (c:va_code{While? c}) : va_code = While?.whileBody c
(** Map syntax **)
//unfold let (.[]) (m:vale_heap) (b:M.buffer64) = fun index -> buffer64_read b index m
// syntax for map accesses, m.[key] and m.[key] <- value
(*
type map (key:eqtype) (value:Type) = Map.t key value
unfold let (.[]) = Map.sel
unfold let (.[]<-) = Map.upd
*)
(** Memory framing **)
(*
unfold let in_mem (addr:int) (m:mem) : bool = m `Map.contains` addr
let disjoint (ptr1:int) (num_bytes1:int) (ptr2:int) (num_bytes2:int) =
ptr1 + num_bytes1 <= ptr2 \/ ptr2 + num_bytes2 <= ptr1
let validSrcAddrs (mem:mem) (addr:int) (size:int) (num_bytes:int) =
size == 64 /\
(forall (a:int) . {:pattern (mem `Map.contains` a)} addr <= a && a < addr+num_bytes && (a - addr) % 8 = 0 ==> mem `Map.contains` a)
let memModified (old_mem:mem) (new_mem:mem) (ptr:int) (num_bytes) =
(forall (a:int) . {:pattern (new_mem `Map.contains` a)} old_mem `Map.contains` a <==> new_mem `Map.contains` a) /\
(forall (a:int) . {:pattern (new_mem.[a]) \/ Map.sel new_mem a} a < ptr || a >= ptr + num_bytes ==> old_mem.[a] == new_mem.[ a])
*)
(** Convenient memory-related functions **)
let rec buffers_readable (h: vale_heap) (l: list M.buffer64) : GTot prop0 (decreases l) =
match l with
| [] -> True
| b :: l' -> buffer_readable h b /\ buffers_readable h l'
unfold let modifies_buffer (b:M.buffer64) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer_2 (b1 b2:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer_3 (b1 b2 b3:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
unfold let modifies_buffer128 (b:M.buffer128) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer128_2 (b1 b2:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer128_3 (b1 b2 b3:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
let validSrcAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
buffer_readable h b /\
len <= buffer_length b /\
M.buffer_addr b h == addr /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) false /\
M.valid_taint_buf b h layout.vl_taint tn
let validDstAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) true /\
buffer_writeable b
let validSrcAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn
let validDstAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn
let validSrcAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn
let validDstAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn
let validSrcAddrsOffset128 (h:vale_heap) (addr:int) (b:M.buffer128) (offset len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h (addr - 16 * offset) b (len + offset) layout tn
let validDstAddrsOffset128 (h:vale_heap) (addr:int) (b:M.buffer128) (offset len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h (addr - 16 * offset) b (len + offset) layout tn
let modifies_buffer_specific128 (b:M.buffer128) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
modifies_buffer128 b h1 h2 /\
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer128_read b i h1
== buffer128_read b i h2)
let buffer_modifies_specific128 (b:M.buffer128) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer128_read b i h1
== buffer128_read b i h2)
let modifies_buffer_specific (b:M.buffer64) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
modifies_buffer b h1 h2 /\
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer64_read b i h1
== buffer64_read b i h2)
unfold let buffers_disjoint (b1 b2:M.buffer64) =
locs_disjoint [loc_buffer b1; loc_buffer b2]
unfold let buffers_disjoint128 (b1 b2:M.buffer128) =
locs_disjoint [loc_buffer b1; loc_buffer b2]
let rec loc_locs_disjoint_rec128 (l:M.buffer128) (ls:list (M.buffer128)) : prop0 =
match ls with
| [] -> True
| h::t -> locs_disjoint [loc_buffer l; loc_buffer h] /\ loc_locs_disjoint_rec128 l t
unfold
let buffer_disjoints128 (l:M.buffer128) (ls:list (M.buffer128)) : prop0 =
norm [zeta; iota; delta_only [`%loc_locs_disjoint_rec128]] (loc_locs_disjoint_rec128 l ls)
unfold let buffers3_disjoint128 (b1 b2 b3:M.buffer128) =
locs_disjoint [loc_buffer b1; loc_buffer b2; loc_buffer b3]
val eval_code (c:va_code) (s0:va_state) (f0:va_fuel) (sN:va_state) : prop0
val eval_while_inv (c:va_code) (s0:va_state) (fW:va_fuel) (sW:va_state) : prop0
[@va_qattr]
let va_state_eq (s0:va_state) (s1:va_state) : prop0 = state_eq s0 s1
let state_inv (s:va_state) : prop0 = M.mem_inv s.vs_heap
let vale_state_with_inv = s:va_state{state_inv s}
let va_require_total (c0:va_code) (c1:va_code) (s0:va_state) : prop0 =
c0 == c1 /\ state_inv s0 | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
c0: Vale.X64.Decls.va_code ->
s0: Vale.X64.Decls.va_state ->
s1: Vale.X64.Decls.va_state ->
f1: Vale.X64.Decls.va_fuel
-> Vale.Def.Prop_s.prop0 | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Decls.va_code",
"Vale.X64.Decls.va_state",
"Vale.X64.Decls.va_fuel",
"Prims.l_and",
"Vale.X64.Decls.eval_code",
"Vale.X64.Decls.state_inv",
"Vale.Def.Prop_s.prop0"
] | [] | false | false | false | true | false | let va_ensure_total (c0: va_code) (s0 s1: va_state) (f1: va_fuel) : prop0 =
| eval_code c0 s0 f1 s1 /\ state_inv s1 | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.memTaint_type | val memTaint_type : Type0 | let memTaint_type = Map.t int taint | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 42,
"end_line": 652,
"start_col": 7,
"start_line": 652
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl
(** Constructors for va_code *)
unfold let va_Block (block:va_codes) : va_code = Block block
unfold let va_IfElse (ifCond:ocmp) (ifTrue:va_code) (ifFalse:va_code) : va_code = IfElse ifCond ifTrue ifFalse
unfold let va_While (whileCond:ocmp) (whileBody:va_code) : va_code = While whileCond whileBody
val va_cmp_eq (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ne (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_le (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ge (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_lt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_gt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
unfold let va_get_block (c:va_code{Block? c}) : va_codes = Block?.block c
unfold let va_get_ifCond (c:va_code{IfElse? c}) : ocmp = IfElse?.ifCond c
unfold let va_get_ifTrue (c:va_code{IfElse? c}) : va_code = IfElse?.ifTrue c
unfold let va_get_ifFalse (c:va_code{IfElse? c}) : va_code = IfElse?.ifFalse c
unfold let va_get_whileCond (c:va_code{While? c}) : ocmp = While?.whileCond c
unfold let va_get_whileBody (c:va_code{While? c}) : va_code = While?.whileBody c
(** Map syntax **)
//unfold let (.[]) (m:vale_heap) (b:M.buffer64) = fun index -> buffer64_read b index m
// syntax for map accesses, m.[key] and m.[key] <- value
(*
type map (key:eqtype) (value:Type) = Map.t key value
unfold let (.[]) = Map.sel
unfold let (.[]<-) = Map.upd
*)
(** Memory framing **)
(*
unfold let in_mem (addr:int) (m:mem) : bool = m `Map.contains` addr
let disjoint (ptr1:int) (num_bytes1:int) (ptr2:int) (num_bytes2:int) =
ptr1 + num_bytes1 <= ptr2 \/ ptr2 + num_bytes2 <= ptr1
let validSrcAddrs (mem:mem) (addr:int) (size:int) (num_bytes:int) =
size == 64 /\
(forall (a:int) . {:pattern (mem `Map.contains` a)} addr <= a && a < addr+num_bytes && (a - addr) % 8 = 0 ==> mem `Map.contains` a)
let memModified (old_mem:mem) (new_mem:mem) (ptr:int) (num_bytes) =
(forall (a:int) . {:pattern (new_mem `Map.contains` a)} old_mem `Map.contains` a <==> new_mem `Map.contains` a) /\
(forall (a:int) . {:pattern (new_mem.[a]) \/ Map.sel new_mem a} a < ptr || a >= ptr + num_bytes ==> old_mem.[a] == new_mem.[ a])
*)
(** Convenient memory-related functions **)
let rec buffers_readable (h: vale_heap) (l: list M.buffer64) : GTot prop0 (decreases l) =
match l with
| [] -> True
| b :: l' -> buffer_readable h b /\ buffers_readable h l'
unfold let modifies_buffer (b:M.buffer64) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer_2 (b1 b2:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer_3 (b1 b2 b3:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
unfold let modifies_buffer128 (b:M.buffer128) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer128_2 (b1 b2:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer128_3 (b1 b2 b3:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
let validSrcAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
buffer_readable h b /\
len <= buffer_length b /\
M.buffer_addr b h == addr /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) false /\
M.valid_taint_buf b h layout.vl_taint tn
let validDstAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) true /\
buffer_writeable b
let validSrcAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn
let validDstAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn
let validSrcAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn
let validDstAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn
let validSrcAddrsOffset128 (h:vale_heap) (addr:int) (b:M.buffer128) (offset len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h (addr - 16 * offset) b (len + offset) layout tn
let validDstAddrsOffset128 (h:vale_heap) (addr:int) (b:M.buffer128) (offset len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h (addr - 16 * offset) b (len + offset) layout tn
let modifies_buffer_specific128 (b:M.buffer128) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
modifies_buffer128 b h1 h2 /\
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer128_read b i h1
== buffer128_read b i h2)
let buffer_modifies_specific128 (b:M.buffer128) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer128_read b i h1
== buffer128_read b i h2)
let modifies_buffer_specific (b:M.buffer64) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
modifies_buffer b h1 h2 /\
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer64_read b i h1
== buffer64_read b i h2)
unfold let buffers_disjoint (b1 b2:M.buffer64) =
locs_disjoint [loc_buffer b1; loc_buffer b2]
unfold let buffers_disjoint128 (b1 b2:M.buffer128) =
locs_disjoint [loc_buffer b1; loc_buffer b2]
let rec loc_locs_disjoint_rec128 (l:M.buffer128) (ls:list (M.buffer128)) : prop0 =
match ls with
| [] -> True
| h::t -> locs_disjoint [loc_buffer l; loc_buffer h] /\ loc_locs_disjoint_rec128 l t
unfold
let buffer_disjoints128 (l:M.buffer128) (ls:list (M.buffer128)) : prop0 =
norm [zeta; iota; delta_only [`%loc_locs_disjoint_rec128]] (loc_locs_disjoint_rec128 l ls)
unfold let buffers3_disjoint128 (b1 b2 b3:M.buffer128) =
locs_disjoint [loc_buffer b1; loc_buffer b2; loc_buffer b3]
val eval_code (c:va_code) (s0:va_state) (f0:va_fuel) (sN:va_state) : prop0
val eval_while_inv (c:va_code) (s0:va_state) (fW:va_fuel) (sW:va_state) : prop0
[@va_qattr]
let va_state_eq (s0:va_state) (s1:va_state) : prop0 = state_eq s0 s1
let state_inv (s:va_state) : prop0 = M.mem_inv s.vs_heap
let vale_state_with_inv = s:va_state{state_inv s}
let va_require_total (c0:va_code) (c1:va_code) (s0:va_state) : prop0 =
c0 == c1 /\ state_inv s0
let va_ensure_total (c0:va_code) (s0:va_state) (s1:va_state) (f1:va_fuel) : prop0 =
eval_code c0 s0 f1 s1 /\ state_inv s1
val eval_ocmp : s:va_state -> c:ocmp -> GTot bool
unfold let va_evalCond (b:ocmp) (s:va_state) : GTot bool = eval_ocmp s b
val valid_ocmp : c:ocmp -> s:va_state -> GTot bool
val havoc_flags : Flags.t
val lemma_cmp_eq : s:va_state -> o1:operand64{ not (OMem? o1 || OStack? o1) } -> o2:operand64{ not (OMem? o2 || OStack? o2) } -> Lemma
(requires True)
(ensures (eval_ocmp s (va_cmp_eq o1 o2)) <==> (va_eval_opr64 s o1 == va_eval_opr64 s o2))
[SMTPat (eval_ocmp s (va_cmp_eq o1 o2))]
val lemma_cmp_ne : s:va_state -> o1:operand64{ not (OMem? o1 || OStack? o1) } -> o2:operand64{ not (OMem? o2 || OStack? o2) } -> Lemma
(requires True)
(ensures (eval_ocmp s (va_cmp_ne o1 o2)) <==> (va_eval_opr64 s o1 <> va_eval_opr64 s o2))
[SMTPat (eval_ocmp s (va_cmp_ne o1 o2))]
val lemma_cmp_le : s:va_state -> o1:operand64{ not (OMem? o1 || OStack? o1) } -> o2:operand64{ not (OMem? o2 || OStack? o2) } -> Lemma
(requires True)
(ensures (eval_ocmp s (va_cmp_le o1 o2)) <==> (va_eval_opr64 s o1 <= va_eval_opr64 s o2))
[SMTPat (eval_ocmp s (va_cmp_le o1 o2))]
val lemma_cmp_ge : s:va_state -> o1:operand64{ not (OMem? o1 || OStack? o1) } -> o2:operand64{ not (OMem? o2 || OStack? o2) } -> Lemma
(requires True)
(ensures (eval_ocmp s (va_cmp_ge o1 o2)) <==> (va_eval_opr64 s o1 >= va_eval_opr64 s o2))
[SMTPat (eval_ocmp s (va_cmp_ge o1 o2))]
val lemma_cmp_lt : s:va_state -> o1:operand64{ not (OMem? o1 || OStack? o1) } -> o2:operand64{ not (OMem? o2 || OStack? o2) } -> Lemma
(requires True)
(ensures (eval_ocmp s (va_cmp_lt o1 o2)) <==> (va_eval_opr64 s o1 < va_eval_opr64 s o2))
[SMTPat (eval_ocmp s (va_cmp_lt o1 o2))]
val lemma_cmp_gt : s:va_state -> o1:operand64{ not (OMem? o1 || OStack? o1) } -> o2:operand64{ not (OMem? o2 || OStack? o2) } -> Lemma
(requires True)
(ensures (eval_ocmp s (va_cmp_gt o1 o2)) <==> (va_eval_opr64 s o1 > va_eval_opr64 s o2))
[SMTPat (eval_ocmp s (va_cmp_gt o1 o2))]
val lemma_valid_cmp_eq : s:va_state -> o1:operand64{ not (OMem? o1 || OStack? o1) } -> o2:operand64{ not (OMem? o2 || OStack? o2) } -> Lemma
(requires True)
(ensures (valid_operand o1 s /\ valid_operand o2 s) ==> (valid_ocmp (va_cmp_eq o1 o2) s))
[SMTPat (valid_ocmp (va_cmp_eq o1 o2) s)]
val lemma_valid_cmp_ne : s:va_state -> o1:operand64{ not (OMem? o1 || OStack? o1) } -> o2:operand64{ not (OMem? o2 || OStack? o2) } -> Lemma
(requires True)
(ensures (valid_operand o1 s /\ valid_operand o2 s) ==> (valid_ocmp (va_cmp_ne o1 o2) s))
[SMTPat (valid_ocmp (va_cmp_ne o1 o2) s)]
val lemma_valid_cmp_le : s:va_state -> o1:operand64{ not (OMem? o1 || OStack? o1) } -> o2:operand64{ not (OMem? o2 || OStack? o2) } -> Lemma
(requires True)
(ensures (valid_operand o1 s /\ valid_operand o2 s) ==> (valid_ocmp (va_cmp_le o1 o2) s))
[SMTPat (valid_ocmp (va_cmp_le o1 o2) s)]
val lemma_valid_cmp_ge : s:va_state -> o1:operand64{ not (OMem? o1 || OStack? o1) } -> o2:operand64{ not (OMem? o2 || OStack? o2) } -> Lemma
(requires True)
(ensures (valid_operand o1 s /\ valid_operand o2 s) ==> (valid_ocmp (va_cmp_ge o1 o2) s))
[SMTPat (valid_ocmp (va_cmp_ge o1 o2) s)]
val lemma_valid_cmp_lt : s:va_state -> o1:operand64{ not (OMem? o1 || OStack? o1) } -> o2:operand64{ not (OMem? o2 || OStack? o2) } -> Lemma
(requires True)
(ensures (valid_operand o1 s /\ valid_operand o2 s) ==> (valid_ocmp (va_cmp_lt o1 o2) s))
[SMTPat (valid_ocmp (va_cmp_lt o1 o2) s)]
val lemma_valid_cmp_gt : s:va_state -> o1:operand64{ not (OMem? o1 || OStack? o1) } -> o2:operand64{ not (OMem? o2 || OStack? o2) } -> Lemma
(requires True)
(ensures (valid_operand o1 s /\ valid_operand o2 s) ==> (valid_ocmp (va_cmp_gt o1 o2) s))
[SMTPat (valid_ocmp (va_cmp_gt o1 o2) s)]
val va_compute_merge_total (f0:va_fuel) (fM:va_fuel) : va_fuel
val va_lemma_merge_total (b0:va_codes) (s0:va_state) (f0:va_fuel) (sM:va_state) (fM:va_fuel) (sN:va_state) : Ghost va_fuel
(requires
Cons? b0 /\
eval_code (Cons?.hd b0) s0 f0 sM /\
eval_code (va_Block (Cons?.tl b0)) sM fM sN
)
(ensures (fun fN ->
fN == va_compute_merge_total f0 fM /\
eval_code (va_Block b0) s0 fN sN
))
val va_lemma_empty_total (s0:va_state) (bN:va_codes) : Ghost (va_state & va_fuel)
(requires True)
(ensures (fun (sM, fM) ->
s0 == sM /\
eval_code (va_Block []) s0 fM sM
))
val va_lemma_ifElse_total (ifb:ocmp) (ct:va_code) (cf:va_code) (s0:va_state) : Ghost (bool & va_state & va_state & va_fuel)
(requires True)
(ensures (fun (cond, sM, sN, f0) ->
cond == eval_ocmp s0 ifb /\
sM == {s0 with vs_flags = havoc_flags}
))
val va_lemma_ifElseTrue_total (ifb:ocmp) (ct:va_code) (cf:va_code) (s0:va_state) (f0:va_fuel) (sM:va_state) : Lemma
(requires
valid_ocmp ifb s0 /\
eval_ocmp s0 ifb /\
eval_code ct ({s0 with vs_flags = havoc_flags}) f0 sM
)
(ensures
eval_code (IfElse ifb ct cf) s0 f0 sM
)
val va_lemma_ifElseFalse_total (ifb:ocmp) (ct:va_code) (cf:va_code) (s0:va_state) (f0:va_fuel) (sM:va_state) : Lemma
(requires
valid_ocmp ifb s0 /\
not (eval_ocmp s0 ifb) /\
eval_code cf ({s0 with vs_flags = havoc_flags}) f0 sM
)
(ensures
eval_code (IfElse ifb ct cf) s0 f0 sM
)
let va_whileInv_total (b:ocmp) (c:va_code) (s0:va_state) (sN:va_state) (f0:va_fuel) : prop0 =
eval_while_inv (While b c) s0 f0 sN /\ state_inv s0
val va_lemma_while_total (b:ocmp) (c:va_code) (s0:va_state) : Ghost (va_state & va_fuel)
(requires True)
(ensures fun (s1, f1) ->
s1 == s0 /\
eval_while_inv (While b c) s1 f1 s1
)
val va_lemma_whileTrue_total (b:ocmp) (c:va_code) (s0:va_state) (sW:va_state) (fW:va_fuel) : Ghost (va_state & va_fuel)
(requires eval_ocmp sW b /\ valid_ocmp b sW)
(ensures fun (s1, f1) -> s1 == {sW with vs_flags = havoc_flags} /\ f1 == fW)
val va_lemma_whileFalse_total (b:ocmp) (c:va_code) (s0:va_state) (sW:va_state) (fW:va_fuel) : Ghost (va_state & va_fuel)
(requires
valid_ocmp b sW /\
not (eval_ocmp sW b) /\
eval_while_inv (While b c) s0 fW sW
)
(ensures fun (s1, f1) ->
s1 == {sW with vs_flags = havoc_flags} /\
eval_code (While b c) s0 f1 s1
)
val va_lemma_whileMerge_total (c:va_code) (s0:va_state) (f0:va_fuel) (sM:va_state) (fM:va_fuel) (sN:va_state) : Ghost va_fuel
(requires While? c /\ (
let cond = While?.whileCond c in
sN.vs_ok /\
valid_ocmp cond sM /\
eval_ocmp sM cond /\
eval_while_inv c s0 f0 sM /\
eval_code (While?.whileBody c) ({sM with vs_flags = havoc_flags}) fM sN
))
(ensures (fun fN ->
eval_while_inv c s0 fN sN
))
val printer : Type0
val print_string : string -> FStar.All.ML unit
val print_header : printer -> FStar.All.ML unit
val print_proc : (name:string) -> (code:va_code) -> (label:int) -> (p:printer) -> FStar.All.ML int
val print_footer : printer -> FStar.All.ML unit
val masm : printer
val gcc : printer
val gcc_linux : printer | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | Type0 | Prims.Tot | [
"total"
] | [] | [
"FStar.Map.t",
"Prims.int",
"Vale.Arch.HeapTypes_s.taint"
] | [] | false | false | false | true | true | let memTaint_type =
| Map.t int taint | false |
|
Vale.X64.Decls.fsti | Vale.X64.Decls.buffers_readable | val buffers_readable (h: vale_heap) (l: list M.buffer64) : GTot prop0 (decreases l) | val buffers_readable (h: vale_heap) (l: list M.buffer64) : GTot prop0 (decreases l) | let rec buffers_readable (h: vale_heap) (l: list M.buffer64) : GTot prop0 (decreases l) =
match l with
| [] -> True
| b :: l' -> buffer_readable h b /\ buffers_readable h l' | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 62,
"end_line": 388,
"start_col": 0,
"start_line": 385
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl
(** Constructors for va_code *)
unfold let va_Block (block:va_codes) : va_code = Block block
unfold let va_IfElse (ifCond:ocmp) (ifTrue:va_code) (ifFalse:va_code) : va_code = IfElse ifCond ifTrue ifFalse
unfold let va_While (whileCond:ocmp) (whileBody:va_code) : va_code = While whileCond whileBody
val va_cmp_eq (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ne (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_le (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ge (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_lt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_gt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
unfold let va_get_block (c:va_code{Block? c}) : va_codes = Block?.block c
unfold let va_get_ifCond (c:va_code{IfElse? c}) : ocmp = IfElse?.ifCond c
unfold let va_get_ifTrue (c:va_code{IfElse? c}) : va_code = IfElse?.ifTrue c
unfold let va_get_ifFalse (c:va_code{IfElse? c}) : va_code = IfElse?.ifFalse c
unfold let va_get_whileCond (c:va_code{While? c}) : ocmp = While?.whileCond c
unfold let va_get_whileBody (c:va_code{While? c}) : va_code = While?.whileBody c
(** Map syntax **)
//unfold let (.[]) (m:vale_heap) (b:M.buffer64) = fun index -> buffer64_read b index m
// syntax for map accesses, m.[key] and m.[key] <- value
(*
type map (key:eqtype) (value:Type) = Map.t key value
unfold let (.[]) = Map.sel
unfold let (.[]<-) = Map.upd
*)
(** Memory framing **)
(*
unfold let in_mem (addr:int) (m:mem) : bool = m `Map.contains` addr
let disjoint (ptr1:int) (num_bytes1:int) (ptr2:int) (num_bytes2:int) =
ptr1 + num_bytes1 <= ptr2 \/ ptr2 + num_bytes2 <= ptr1
let validSrcAddrs (mem:mem) (addr:int) (size:int) (num_bytes:int) =
size == 64 /\
(forall (a:int) . {:pattern (mem `Map.contains` a)} addr <= a && a < addr+num_bytes && (a - addr) % 8 = 0 ==> mem `Map.contains` a)
let memModified (old_mem:mem) (new_mem:mem) (ptr:int) (num_bytes) =
(forall (a:int) . {:pattern (new_mem `Map.contains` a)} old_mem `Map.contains` a <==> new_mem `Map.contains` a) /\
(forall (a:int) . {:pattern (new_mem.[a]) \/ Map.sel new_mem a} a < ptr || a >= ptr + num_bytes ==> old_mem.[a] == new_mem.[ a])
*) | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | h: Vale.X64.Decls.vale_heap -> l: Prims.list Vale.X64.Memory.buffer64
-> Prims.GTot Vale.Def.Prop_s.prop0 | Prims.GTot | [
"sometrivial",
""
] | [] | [
"Vale.X64.Decls.vale_heap",
"Prims.list",
"Vale.X64.Memory.buffer64",
"Prims.l_True",
"Prims.l_and",
"Vale.X64.Decls.buffer_readable",
"Vale.X64.Memory.vuint64",
"Vale.X64.Decls.buffers_readable",
"Vale.Def.Prop_s.prop0"
] | [
"recursion"
] | false | false | false | false | false | let rec buffers_readable (h: vale_heap) (l: list M.buffer64) : GTot prop0 (decreases l) =
| match l with
| [] -> True
| b :: l' -> buffer_readable h b /\ buffers_readable h l' | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.va_whileInv_total | val va_whileInv_total (b: ocmp) (c: va_code) (s0 sN: va_state) (f0: va_fuel) : prop0 | val va_whileInv_total (b: ocmp) (c: va_code) (s0 sN: va_state) (f0: va_fuel) : prop0 | let va_whileInv_total (b:ocmp) (c:va_code) (s0:va_state) (sN:va_state) (f0:va_fuel) : prop0 =
eval_while_inv (While b c) s0 f0 sN /\ state_inv s0 | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 53,
"end_line": 606,
"start_col": 0,
"start_line": 605
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl
(** Constructors for va_code *)
unfold let va_Block (block:va_codes) : va_code = Block block
unfold let va_IfElse (ifCond:ocmp) (ifTrue:va_code) (ifFalse:va_code) : va_code = IfElse ifCond ifTrue ifFalse
unfold let va_While (whileCond:ocmp) (whileBody:va_code) : va_code = While whileCond whileBody
val va_cmp_eq (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ne (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_le (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ge (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_lt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_gt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
unfold let va_get_block (c:va_code{Block? c}) : va_codes = Block?.block c
unfold let va_get_ifCond (c:va_code{IfElse? c}) : ocmp = IfElse?.ifCond c
unfold let va_get_ifTrue (c:va_code{IfElse? c}) : va_code = IfElse?.ifTrue c
unfold let va_get_ifFalse (c:va_code{IfElse? c}) : va_code = IfElse?.ifFalse c
unfold let va_get_whileCond (c:va_code{While? c}) : ocmp = While?.whileCond c
unfold let va_get_whileBody (c:va_code{While? c}) : va_code = While?.whileBody c
(** Map syntax **)
//unfold let (.[]) (m:vale_heap) (b:M.buffer64) = fun index -> buffer64_read b index m
// syntax for map accesses, m.[key] and m.[key] <- value
(*
type map (key:eqtype) (value:Type) = Map.t key value
unfold let (.[]) = Map.sel
unfold let (.[]<-) = Map.upd
*)
(** Memory framing **)
(*
unfold let in_mem (addr:int) (m:mem) : bool = m `Map.contains` addr
let disjoint (ptr1:int) (num_bytes1:int) (ptr2:int) (num_bytes2:int) =
ptr1 + num_bytes1 <= ptr2 \/ ptr2 + num_bytes2 <= ptr1
let validSrcAddrs (mem:mem) (addr:int) (size:int) (num_bytes:int) =
size == 64 /\
(forall (a:int) . {:pattern (mem `Map.contains` a)} addr <= a && a < addr+num_bytes && (a - addr) % 8 = 0 ==> mem `Map.contains` a)
let memModified (old_mem:mem) (new_mem:mem) (ptr:int) (num_bytes) =
(forall (a:int) . {:pattern (new_mem `Map.contains` a)} old_mem `Map.contains` a <==> new_mem `Map.contains` a) /\
(forall (a:int) . {:pattern (new_mem.[a]) \/ Map.sel new_mem a} a < ptr || a >= ptr + num_bytes ==> old_mem.[a] == new_mem.[ a])
*)
(** Convenient memory-related functions **)
let rec buffers_readable (h: vale_heap) (l: list M.buffer64) : GTot prop0 (decreases l) =
match l with
| [] -> True
| b :: l' -> buffer_readable h b /\ buffers_readable h l'
unfold let modifies_buffer (b:M.buffer64) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer_2 (b1 b2:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer_3 (b1 b2 b3:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
unfold let modifies_buffer128 (b:M.buffer128) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer128_2 (b1 b2:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer128_3 (b1 b2 b3:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
let validSrcAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
buffer_readable h b /\
len <= buffer_length b /\
M.buffer_addr b h == addr /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) false /\
M.valid_taint_buf b h layout.vl_taint tn
let validDstAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) true /\
buffer_writeable b
let validSrcAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn
let validDstAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn
let validSrcAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn
let validDstAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn
let validSrcAddrsOffset128 (h:vale_heap) (addr:int) (b:M.buffer128) (offset len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h (addr - 16 * offset) b (len + offset) layout tn
let validDstAddrsOffset128 (h:vale_heap) (addr:int) (b:M.buffer128) (offset len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h (addr - 16 * offset) b (len + offset) layout tn
let modifies_buffer_specific128 (b:M.buffer128) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
modifies_buffer128 b h1 h2 /\
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer128_read b i h1
== buffer128_read b i h2)
let buffer_modifies_specific128 (b:M.buffer128) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer128_read b i h1
== buffer128_read b i h2)
let modifies_buffer_specific (b:M.buffer64) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
modifies_buffer b h1 h2 /\
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer64_read b i h1
== buffer64_read b i h2)
unfold let buffers_disjoint (b1 b2:M.buffer64) =
locs_disjoint [loc_buffer b1; loc_buffer b2]
unfold let buffers_disjoint128 (b1 b2:M.buffer128) =
locs_disjoint [loc_buffer b1; loc_buffer b2]
let rec loc_locs_disjoint_rec128 (l:M.buffer128) (ls:list (M.buffer128)) : prop0 =
match ls with
| [] -> True
| h::t -> locs_disjoint [loc_buffer l; loc_buffer h] /\ loc_locs_disjoint_rec128 l t
unfold
let buffer_disjoints128 (l:M.buffer128) (ls:list (M.buffer128)) : prop0 =
norm [zeta; iota; delta_only [`%loc_locs_disjoint_rec128]] (loc_locs_disjoint_rec128 l ls)
unfold let buffers3_disjoint128 (b1 b2 b3:M.buffer128) =
locs_disjoint [loc_buffer b1; loc_buffer b2; loc_buffer b3]
val eval_code (c:va_code) (s0:va_state) (f0:va_fuel) (sN:va_state) : prop0
val eval_while_inv (c:va_code) (s0:va_state) (fW:va_fuel) (sW:va_state) : prop0
[@va_qattr]
let va_state_eq (s0:va_state) (s1:va_state) : prop0 = state_eq s0 s1
let state_inv (s:va_state) : prop0 = M.mem_inv s.vs_heap
let vale_state_with_inv = s:va_state{state_inv s}
let va_require_total (c0:va_code) (c1:va_code) (s0:va_state) : prop0 =
c0 == c1 /\ state_inv s0
let va_ensure_total (c0:va_code) (s0:va_state) (s1:va_state) (f1:va_fuel) : prop0 =
eval_code c0 s0 f1 s1 /\ state_inv s1
val eval_ocmp : s:va_state -> c:ocmp -> GTot bool
unfold let va_evalCond (b:ocmp) (s:va_state) : GTot bool = eval_ocmp s b
val valid_ocmp : c:ocmp -> s:va_state -> GTot bool
val havoc_flags : Flags.t
val lemma_cmp_eq : s:va_state -> o1:operand64{ not (OMem? o1 || OStack? o1) } -> o2:operand64{ not (OMem? o2 || OStack? o2) } -> Lemma
(requires True)
(ensures (eval_ocmp s (va_cmp_eq o1 o2)) <==> (va_eval_opr64 s o1 == va_eval_opr64 s o2))
[SMTPat (eval_ocmp s (va_cmp_eq o1 o2))]
val lemma_cmp_ne : s:va_state -> o1:operand64{ not (OMem? o1 || OStack? o1) } -> o2:operand64{ not (OMem? o2 || OStack? o2) } -> Lemma
(requires True)
(ensures (eval_ocmp s (va_cmp_ne o1 o2)) <==> (va_eval_opr64 s o1 <> va_eval_opr64 s o2))
[SMTPat (eval_ocmp s (va_cmp_ne o1 o2))]
val lemma_cmp_le : s:va_state -> o1:operand64{ not (OMem? o1 || OStack? o1) } -> o2:operand64{ not (OMem? o2 || OStack? o2) } -> Lemma
(requires True)
(ensures (eval_ocmp s (va_cmp_le o1 o2)) <==> (va_eval_opr64 s o1 <= va_eval_opr64 s o2))
[SMTPat (eval_ocmp s (va_cmp_le o1 o2))]
val lemma_cmp_ge : s:va_state -> o1:operand64{ not (OMem? o1 || OStack? o1) } -> o2:operand64{ not (OMem? o2 || OStack? o2) } -> Lemma
(requires True)
(ensures (eval_ocmp s (va_cmp_ge o1 o2)) <==> (va_eval_opr64 s o1 >= va_eval_opr64 s o2))
[SMTPat (eval_ocmp s (va_cmp_ge o1 o2))]
val lemma_cmp_lt : s:va_state -> o1:operand64{ not (OMem? o1 || OStack? o1) } -> o2:operand64{ not (OMem? o2 || OStack? o2) } -> Lemma
(requires True)
(ensures (eval_ocmp s (va_cmp_lt o1 o2)) <==> (va_eval_opr64 s o1 < va_eval_opr64 s o2))
[SMTPat (eval_ocmp s (va_cmp_lt o1 o2))]
val lemma_cmp_gt : s:va_state -> o1:operand64{ not (OMem? o1 || OStack? o1) } -> o2:operand64{ not (OMem? o2 || OStack? o2) } -> Lemma
(requires True)
(ensures (eval_ocmp s (va_cmp_gt o1 o2)) <==> (va_eval_opr64 s o1 > va_eval_opr64 s o2))
[SMTPat (eval_ocmp s (va_cmp_gt o1 o2))]
val lemma_valid_cmp_eq : s:va_state -> o1:operand64{ not (OMem? o1 || OStack? o1) } -> o2:operand64{ not (OMem? o2 || OStack? o2) } -> Lemma
(requires True)
(ensures (valid_operand o1 s /\ valid_operand o2 s) ==> (valid_ocmp (va_cmp_eq o1 o2) s))
[SMTPat (valid_ocmp (va_cmp_eq o1 o2) s)]
val lemma_valid_cmp_ne : s:va_state -> o1:operand64{ not (OMem? o1 || OStack? o1) } -> o2:operand64{ not (OMem? o2 || OStack? o2) } -> Lemma
(requires True)
(ensures (valid_operand o1 s /\ valid_operand o2 s) ==> (valid_ocmp (va_cmp_ne o1 o2) s))
[SMTPat (valid_ocmp (va_cmp_ne o1 o2) s)]
val lemma_valid_cmp_le : s:va_state -> o1:operand64{ not (OMem? o1 || OStack? o1) } -> o2:operand64{ not (OMem? o2 || OStack? o2) } -> Lemma
(requires True)
(ensures (valid_operand o1 s /\ valid_operand o2 s) ==> (valid_ocmp (va_cmp_le o1 o2) s))
[SMTPat (valid_ocmp (va_cmp_le o1 o2) s)]
val lemma_valid_cmp_ge : s:va_state -> o1:operand64{ not (OMem? o1 || OStack? o1) } -> o2:operand64{ not (OMem? o2 || OStack? o2) } -> Lemma
(requires True)
(ensures (valid_operand o1 s /\ valid_operand o2 s) ==> (valid_ocmp (va_cmp_ge o1 o2) s))
[SMTPat (valid_ocmp (va_cmp_ge o1 o2) s)]
val lemma_valid_cmp_lt : s:va_state -> o1:operand64{ not (OMem? o1 || OStack? o1) } -> o2:operand64{ not (OMem? o2 || OStack? o2) } -> Lemma
(requires True)
(ensures (valid_operand o1 s /\ valid_operand o2 s) ==> (valid_ocmp (va_cmp_lt o1 o2) s))
[SMTPat (valid_ocmp (va_cmp_lt o1 o2) s)]
val lemma_valid_cmp_gt : s:va_state -> o1:operand64{ not (OMem? o1 || OStack? o1) } -> o2:operand64{ not (OMem? o2 || OStack? o2) } -> Lemma
(requires True)
(ensures (valid_operand o1 s /\ valid_operand o2 s) ==> (valid_ocmp (va_cmp_gt o1 o2) s))
[SMTPat (valid_ocmp (va_cmp_gt o1 o2) s)]
val va_compute_merge_total (f0:va_fuel) (fM:va_fuel) : va_fuel
val va_lemma_merge_total (b0:va_codes) (s0:va_state) (f0:va_fuel) (sM:va_state) (fM:va_fuel) (sN:va_state) : Ghost va_fuel
(requires
Cons? b0 /\
eval_code (Cons?.hd b0) s0 f0 sM /\
eval_code (va_Block (Cons?.tl b0)) sM fM sN
)
(ensures (fun fN ->
fN == va_compute_merge_total f0 fM /\
eval_code (va_Block b0) s0 fN sN
))
val va_lemma_empty_total (s0:va_state) (bN:va_codes) : Ghost (va_state & va_fuel)
(requires True)
(ensures (fun (sM, fM) ->
s0 == sM /\
eval_code (va_Block []) s0 fM sM
))
val va_lemma_ifElse_total (ifb:ocmp) (ct:va_code) (cf:va_code) (s0:va_state) : Ghost (bool & va_state & va_state & va_fuel)
(requires True)
(ensures (fun (cond, sM, sN, f0) ->
cond == eval_ocmp s0 ifb /\
sM == {s0 with vs_flags = havoc_flags}
))
val va_lemma_ifElseTrue_total (ifb:ocmp) (ct:va_code) (cf:va_code) (s0:va_state) (f0:va_fuel) (sM:va_state) : Lemma
(requires
valid_ocmp ifb s0 /\
eval_ocmp s0 ifb /\
eval_code ct ({s0 with vs_flags = havoc_flags}) f0 sM
)
(ensures
eval_code (IfElse ifb ct cf) s0 f0 sM
)
val va_lemma_ifElseFalse_total (ifb:ocmp) (ct:va_code) (cf:va_code) (s0:va_state) (f0:va_fuel) (sM:va_state) : Lemma
(requires
valid_ocmp ifb s0 /\
not (eval_ocmp s0 ifb) /\
eval_code cf ({s0 with vs_flags = havoc_flags}) f0 sM
)
(ensures
eval_code (IfElse ifb ct cf) s0 f0 sM
) | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
b: Vale.X64.Decls.ocmp ->
c: Vale.X64.Decls.va_code ->
s0: Vale.X64.Decls.va_state ->
sN: Vale.X64.Decls.va_state ->
f0: Vale.X64.Decls.va_fuel
-> Vale.Def.Prop_s.prop0 | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Decls.ocmp",
"Vale.X64.Decls.va_code",
"Vale.X64.Decls.va_state",
"Vale.X64.Decls.va_fuel",
"Prims.l_and",
"Vale.X64.Decls.eval_while_inv",
"Vale.X64.Machine_s.While",
"Vale.X64.Decls.ins",
"Vale.X64.Decls.state_inv",
"Vale.Def.Prop_s.prop0"
] | [] | false | false | false | true | false | let va_whileInv_total (b: ocmp) (c: va_code) (s0 sN: va_state) (f0: va_fuel) : prop0 =
| eval_while_inv (While b c) s0 f0 sN /\ state_inv s0 | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.loc_locs_disjoint_rec128 | val loc_locs_disjoint_rec128 (l: M.buffer128) (ls: list (M.buffer128)) : prop0 | val loc_locs_disjoint_rec128 (l: M.buffer128) (ls: list (M.buffer128)) : prop0 | let rec loc_locs_disjoint_rec128 (l:M.buffer128) (ls:list (M.buffer128)) : prop0 =
match ls with
| [] -> True
| h::t -> locs_disjoint [loc_buffer l; loc_buffer h] /\ loc_locs_disjoint_rec128 l t | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 86,
"end_line": 466,
"start_col": 0,
"start_line": 463
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl
(** Constructors for va_code *)
unfold let va_Block (block:va_codes) : va_code = Block block
unfold let va_IfElse (ifCond:ocmp) (ifTrue:va_code) (ifFalse:va_code) : va_code = IfElse ifCond ifTrue ifFalse
unfold let va_While (whileCond:ocmp) (whileBody:va_code) : va_code = While whileCond whileBody
val va_cmp_eq (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ne (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_le (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ge (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_lt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_gt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
unfold let va_get_block (c:va_code{Block? c}) : va_codes = Block?.block c
unfold let va_get_ifCond (c:va_code{IfElse? c}) : ocmp = IfElse?.ifCond c
unfold let va_get_ifTrue (c:va_code{IfElse? c}) : va_code = IfElse?.ifTrue c
unfold let va_get_ifFalse (c:va_code{IfElse? c}) : va_code = IfElse?.ifFalse c
unfold let va_get_whileCond (c:va_code{While? c}) : ocmp = While?.whileCond c
unfold let va_get_whileBody (c:va_code{While? c}) : va_code = While?.whileBody c
(** Map syntax **)
//unfold let (.[]) (m:vale_heap) (b:M.buffer64) = fun index -> buffer64_read b index m
// syntax for map accesses, m.[key] and m.[key] <- value
(*
type map (key:eqtype) (value:Type) = Map.t key value
unfold let (.[]) = Map.sel
unfold let (.[]<-) = Map.upd
*)
(** Memory framing **)
(*
unfold let in_mem (addr:int) (m:mem) : bool = m `Map.contains` addr
let disjoint (ptr1:int) (num_bytes1:int) (ptr2:int) (num_bytes2:int) =
ptr1 + num_bytes1 <= ptr2 \/ ptr2 + num_bytes2 <= ptr1
let validSrcAddrs (mem:mem) (addr:int) (size:int) (num_bytes:int) =
size == 64 /\
(forall (a:int) . {:pattern (mem `Map.contains` a)} addr <= a && a < addr+num_bytes && (a - addr) % 8 = 0 ==> mem `Map.contains` a)
let memModified (old_mem:mem) (new_mem:mem) (ptr:int) (num_bytes) =
(forall (a:int) . {:pattern (new_mem `Map.contains` a)} old_mem `Map.contains` a <==> new_mem `Map.contains` a) /\
(forall (a:int) . {:pattern (new_mem.[a]) \/ Map.sel new_mem a} a < ptr || a >= ptr + num_bytes ==> old_mem.[a] == new_mem.[ a])
*)
(** Convenient memory-related functions **)
let rec buffers_readable (h: vale_heap) (l: list M.buffer64) : GTot prop0 (decreases l) =
match l with
| [] -> True
| b :: l' -> buffer_readable h b /\ buffers_readable h l'
unfold let modifies_buffer (b:M.buffer64) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer_2 (b1 b2:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer_3 (b1 b2 b3:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
unfold let modifies_buffer128 (b:M.buffer128) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer128_2 (b1 b2:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer128_3 (b1 b2 b3:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
let validSrcAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
buffer_readable h b /\
len <= buffer_length b /\
M.buffer_addr b h == addr /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) false /\
M.valid_taint_buf b h layout.vl_taint tn
let validDstAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) true /\
buffer_writeable b
let validSrcAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn
let validDstAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn
let validSrcAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn
let validDstAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn
let validSrcAddrsOffset128 (h:vale_heap) (addr:int) (b:M.buffer128) (offset len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h (addr - 16 * offset) b (len + offset) layout tn
let validDstAddrsOffset128 (h:vale_heap) (addr:int) (b:M.buffer128) (offset len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h (addr - 16 * offset) b (len + offset) layout tn
let modifies_buffer_specific128 (b:M.buffer128) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
modifies_buffer128 b h1 h2 /\
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer128_read b i h1
== buffer128_read b i h2)
let buffer_modifies_specific128 (b:M.buffer128) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer128_read b i h1
== buffer128_read b i h2)
let modifies_buffer_specific (b:M.buffer64) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
modifies_buffer b h1 h2 /\
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer64_read b i h1
== buffer64_read b i h2)
unfold let buffers_disjoint (b1 b2:M.buffer64) =
locs_disjoint [loc_buffer b1; loc_buffer b2]
unfold let buffers_disjoint128 (b1 b2:M.buffer128) =
locs_disjoint [loc_buffer b1; loc_buffer b2] | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | l: Vale.X64.Memory.buffer128 -> ls: Prims.list Vale.X64.Memory.buffer128 -> Vale.Def.Prop_s.prop0 | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Memory.buffer128",
"Prims.list",
"Prims.l_True",
"Prims.l_and",
"Vale.X64.Decls.locs_disjoint",
"Prims.Cons",
"Vale.X64.Memory.loc",
"Vale.X64.Decls.loc_buffer",
"Vale.X64.Memory.vuint128",
"Prims.Nil",
"Vale.X64.Decls.loc_locs_disjoint_rec128",
"Vale.Def.Prop_s.prop0"
] | [
"recursion"
] | false | false | false | true | false | let rec loc_locs_disjoint_rec128 (l: M.buffer128) (ls: list (M.buffer128)) : prop0 =
| match ls with
| [] -> True
| h :: t -> locs_disjoint [loc_buffer l; loc_buffer h] /\ loc_locs_disjoint_rec128 l t | false |
Vale.X64.Decls.fsti | Vale.X64.Decls.max_one_mem | val max_one_mem (o1 o2: operand64) : prop0 | val max_one_mem (o1 o2: operand64) : prop0 | let max_one_mem (o1 o2:operand64) : prop0 =
match (o1, o2) with
| (OMem _, OMem _) | (OMem _, OStack _) | (OStack _, OMem _) | (OStack _, OStack _) -> False
| _ -> True | {
"file_name": "vale/code/arch/x64/Vale.X64.Decls.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 13,
"end_line": 658,
"start_col": 0,
"start_line": 655
} | module Vale.X64.Decls
open FStar.Mul
open Vale.Arch.HeapTypes_s
open Vale.Arch.HeapImpl
module M = Vale.X64.Memory
module S = Vale.X64.Stack_i
module Map16 = Vale.Lib.Map16
// This interface should hide all of Machine_Semantics_s.
// (It should not refer to Machine_Semantics_s, directly or indirectly.)
// It should not refer to StateLemmas, Lemmas, or Print_s,
// because they refer to Machine_Semantics_s.
// Stack_i, Memory, Regs, Flags and State are ok, because they do not refer to Machine_Semantics_s.
open Vale.Def.Prop_s
open Vale.X64.Machine_s
open Vale.X64.State
open Vale.Def.Types_s
unfold let vale_heap = M.vale_heap
unfold let vale_full_heap = M.vale_full_heap
unfold let heaplet_id = M.heaplet_id
unfold let quad32 = quad32
val cf (flags:Flags.t) : bool
val overflow (flags:Flags.t) : bool
val valid_cf (flags:Flags.t) : bool
val valid_of (flags:Flags.t) : bool
val updated_cf (new_flags:Flags.t) (new_cf:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == new_cf /\ valid_cf new_flags)
val updated_of (new_flags:Flags.t) (new_of:bool) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == new_of /\ valid_of new_flags)
val maintained_cf (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> cf new_flags == cf flags /\ valid_cf new_flags == valid_cf flags)
val maintained_of (new_flags:Flags.t) (flags:Flags.t) : Pure bool
(requires True)
(ensures fun b -> b <==> overflow new_flags == overflow flags /\ valid_of new_flags == valid_of flags)
//unfold let va_subscript = Map.sel
unfold let va_subscript (#a:eqtype) (#b:Type) (x:Map.t a b) (y:a) : Tot b = Map.sel x y
unfold let va_update = Map.upd
unfold let va_hd = Cons?.hd
//unfold let va_tl = Cons?.tl // F* inlines "let ... = va_tl ..." more than we'd like; revised definition below suppresses this
// REVIEW: reveal_opaque doesn't include zeta, so it fails for recursive functions
[@va_qattr] unfold let va_reveal_eq (#ax:Type) (s:string) (x x':ax) = norm [zeta; delta_only [s]] #ax x == x'
let va_reveal_opaque (s:string) = norm_spec [zeta; delta_only [s]]
// hide 'if' so that x and y get fully normalized
let va_if (#a:Type) (b:bool) (x:(_:unit{b}) -> GTot a) (y:(_:unit{~b}) -> GTot a) : GTot a =
if b then x () else y ()
let total_if (#a:Type) (b:bool) (x y:a) : a =
if b then x else y
let total_thunk_if (#a:Type) (b:bool) (x:(_:unit{b}) -> a) (y:(_:unit{~b}) -> a) : a =
if b then x () else y ()
(* Type aliases *)
let va_int_at_least (k:int) = i:int{i >= k}
let va_int_at_most (k:int) = i:int{i <= k}
let va_int_range (k1 k2:int) = i:int{k1 <= i /\ i <= k2}
val ins : Type0
val ocmp : eqtype
unfold let va_code = precode ins ocmp
unfold let va_codes = list va_code
let va_tl (cs:va_codes) : Ghost va_codes (requires Cons? cs) (ensures fun tl -> tl == Cons?.tl cs) = Cons?.tl cs
unfold let va_state = vale_state
val va_fuel : Type0
unfold let va_operand_opr64 = operand64
let reg_operand = o:operand64{OReg? o}
let va_operand_reg_opr64 = o:operand64{OReg? o}
unfold let va_operand_dst_opr64 = operand64
unfold let va_operand_shift_amt64 = operand64
unfold let cmp_operand = o:operand64{not (OMem? o)}
unfold let va_operand_xmm = reg_xmm
unfold let va_operand_opr128 = operand128
unfold let va_operand_heaplet = heaplet_id
val va_pbool : Type0
val va_ttrue (_:unit) : va_pbool
val va_ffalse (reason:string) : va_pbool
val va_pbool_and (x y:va_pbool) : va_pbool
val get_reason (p:va_pbool) : option string
noeq
type va_transformation_result = {
success : va_pbool;
result : va_code;
}
unfold let va_get_success (r:va_transformation_result) : va_pbool = r.success
unfold let va_get_result (r:va_transformation_result) : va_code = r.result
val mul_nat_helper (x y:nat) : Lemma (x * y >= 0)
[@va_qattr] unfold let va_mul_nat (x y:nat) : nat =
mul_nat_helper x y;
x * y
[@va_qattr] unfold let va_expand_state (s:vale_state) : vale_state = state_eta s
unfold let get_reg (o:reg_operand) : reg = Reg 0 (OReg?.r o)
unfold let buffer_readable (#t:M.base_typ) (h:vale_heap) (b:M.buffer t) : GTot prop0 = M.buffer_readable #t h b
unfold let buffer_writeable (#t:M.base_typ) (b:M.buffer t) : GTot prop0 = M.buffer_writeable #t b
unfold let buffer_length (#t:M.base_typ) (b:M.buffer t) = M.buffer_length #t b
unfold let buffer8_as_seq (m:vale_heap) (b:M.buffer8) : GTot (Seq.seq nat8) = M.buffer_as_seq m b
unfold let buffer64_as_seq (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = M.buffer_as_seq m b
unfold let s64 (m:vale_heap) (b:M.buffer64) : GTot (Seq.seq nat64) = buffer64_as_seq m b
unfold let buffer128_as_seq (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = M.buffer_as_seq m b
unfold let s128 (m:vale_heap) (b:M.buffer128) : GTot (Seq.seq quad32) = buffer128_as_seq m b
unfold let valid_src_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_read m b i
unfold let valid_dst_addr (#t:M.base_typ) (m:vale_heap) (b:M.buffer t) (i:int) : prop0 = M.valid_buffer_write m b i
unfold let buffer64_read (b:M.buffer64) (i:int) (h:vale_heap) : GTot nat64 = M.buffer_read b i h
unfold let buffer128_read (b:M.buffer128) (i:int) (h:vale_heap) : GTot quad32 = M.buffer_read b i h
unfold let modifies_mem (s:M.loc) (h1 h2:vale_heap) : GTot prop0 = M.modifies s h1 h2
unfold let loc_buffer(#t:M.base_typ) (b:M.buffer t) = M.loc_buffer #t b
unfold let locs_disjoint = M.locs_disjoint
unfold let loc_union = M.loc_union
let valid_buf_maddr64 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer64) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf64 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 8 * index
let valid_buf_maddr128 (addr:int) (s_mem:vale_heap) (layout:vale_heap_layout) (b:M.buffer128) (index:int) (t:taint) : prop0 =
valid_src_addr s_mem b index /\
M.valid_taint_buf128 b s_mem layout.vl_taint t /\
addr == M.buffer_addr b s_mem + 16 * index
let valid_mem_operand64 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer64) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr64 addr s_mem layout b index t
let valid_mem_operand128 (addr:int) (t:taint) (s_mem:vale_heap) (layout:vale_heap_layout) : prop0 =
exists (b:M.buffer128) (index:int).{:pattern (M.valid_buffer_read s_mem b index)}
valid_buf_maddr128 addr s_mem layout b index t
[@va_qattr]
let valid_operand (o:operand64) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand o s /\
( match o with
| OMem (m, t) -> valid_mem_operand64 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack64 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
[@va_qattr]
let valid_operand128 (o:operand128) (s:vale_state) : prop0 =
Vale.X64.State.valid_src_operand128 o s /\
( match o with
| OMem (m, t) -> valid_mem_operand128 (eval_maddr m s) t (M.get_vale_heap s.vs_heap) s.vs_heap.vf_layout
| OStack (m, t) -> S.valid_taint_stack128 (eval_maddr m s) t s.vs_stackTaint
| _ -> True
)
(* Constructors *)
val va_fuel_default : unit -> va_fuel
[@va_qattr] unfold let va_op_xmm_xmm (x:reg_xmm) : va_operand_xmm = x
[@va_qattr] unfold let va_op_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_reg64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_opr128_xmm (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_const_opr64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_const_shift_amt64 (n:nat64) : operand64 = OConst n
[@va_qattr] unfold let va_op_shift_amt64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_op_cmp_reg64 (r:reg_64) : cmp_operand = OReg r
[@va_qattr] unfold let va_const_cmp (n:nat64) : cmp_operand = OConst n
[@va_qattr] unfold let va_coerce_reg64_opr64_to_cmp (r:va_operand_reg_opr64) : cmp_operand = r
[@va_qattr] unfold let va_coerce_reg_opr64_to_dst_opr64 (o:va_operand_reg_opr64) : va_operand_dst_opr64 = o
[@va_qattr] unfold let va_coerce_reg_opr64_to_opr64 (o:va_operand_reg_opr64) : va_operand_opr64 = o
[@va_qattr] unfold let va_coerce_opr64_to_cmp (o:operand64{not (OMem? o)}) : cmp_operand = o
[@va_qattr] unfold let va_op_reg_opr64_reg64 (r:reg_64) : reg_operand = OReg r
[@va_qattr] unfold let va_op_dst_opr64_reg64 (r:reg_64) : operand64 = OReg r
[@va_qattr] unfold let va_coerce_dst_opr64_to_opr64 (o:operand64) : operand64 = o
[@va_qattr] unfold let va_coerce_xmm_to_opr128 (x:reg_xmm) : operand128 = OReg x
[@va_qattr] unfold let va_op_heaplet_mem_heaplet (h:heaplet_id) : heaplet_id = h
[@va_qattr]
unfold let va_opr_code_Mem64 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OMem (MConst (n + offset), t)
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Stack (o:operand64) (offset:int) (t:taint) : operand64 =
match o with
| OConst n -> OStack (MConst (n + offset), t)
| OReg r -> OStack (MReg (Reg 0 r) offset, t)
| _ -> OStack (MConst 42, t)
[@va_qattr]
unfold let va_opr_code_Mem128 (h:heaplet_id) (o:operand64) (offset:int) (t:taint) : operand128 =
match o with
| OReg r -> OMem (MReg (Reg 0 r) offset, t)
| _ -> OMem (MConst 42, t)
val taint_at (memTaint:M.memtaint) (addr:int) : taint
(* Getters *)
[@va_qattr] unfold let va_get_ok (s:va_state) : bool = s.vs_ok
[@va_qattr] unfold let va_get_flags (s:va_state) : Flags.t = s.vs_flags
[@va_qattr] unfold let va_get_reg64 (r:reg_64) (s:va_state) : nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_get_xmm (x:reg_xmm) (s:va_state) : quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_get_mem (s:va_state) : vale_heap = M.get_vale_heap s.vs_heap
[@va_qattr] unfold let va_get_mem_layout (s:va_state) : vale_heap_layout = s.vs_heap.vf_layout
[@va_qattr] unfold let va_get_mem_heaplet (n:heaplet_id) (s:va_state) : vale_heap = Map16.sel s.vs_heap.vf_heaplets n
[@va_qattr] unfold let va_get_stack (s:va_state) : S.vale_stack = s.vs_stack
[@va_qattr] unfold let va_get_stackTaint (s:va_state) : M.memtaint = s.vs_stackTaint
[@va_qattr] let va_upd_ok (ok:bool) (s:vale_state) : vale_state = { s with vs_ok = ok }
[@va_qattr] let va_upd_flags (flags:Flags.t) (s:vale_state) : vale_state = { s with vs_flags = flags }
[@va_qattr] let upd_register (r:reg) (v:t_reg r) (s:vale_state) : vale_state = update_reg r v s
[@va_qattr] let va_upd_reg64 (r:reg_64) (v:nat64) (s:vale_state) : vale_state = update_reg_64 r v s
[@va_qattr] let va_upd_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state = update_reg_xmm x v s
[@va_qattr] let va_upd_mem (mem:vale_heap) (s:vale_state) : vale_state = { s with vs_heap = M.set_vale_heap s.vs_heap mem }
[@va_qattr] let va_upd_mem_layout (layout:vale_heap_layout) (s:vale_state) : vale_state = { s with vs_heap = { s.vs_heap with vf_layout = layout } }
[@va_qattr] let va_upd_mem_heaplet (n:heaplet_id) (h:vale_heap) (s:vale_state) : vale_state =
{ s with vs_heap = { s.vs_heap with vf_heaplets = Map16.upd s.vs_heap.vf_heaplets n h } }
[@va_qattr] let va_upd_stack (stack:S.vale_stack) (s:vale_state) : vale_state = { s with vs_stack = stack }
[@va_qattr] let va_upd_stackTaint (stackTaint:M.memtaint) (s:vale_state) : vale_state = { s with vs_stackTaint = stackTaint }
(* Evaluation *)
[@va_qattr] unfold let va_eval_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_dst_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_shift_amt64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_cmp_uint64 (s:va_state) (r:cmp_operand) : GTot nat64 = eval_operand r s
//[@va_qattr] unfold let va_eval_reg64 (s:va_state) (r:va_register) : GTot nat64 = eval_reg_64 r s
[@va_qattr] unfold let va_eval_reg_opr64 (s:va_state) (o:operand64) : GTot nat64 = eval_operand o s
[@va_qattr] unfold let va_eval_xmm (s:va_state) (x:reg_xmm) : GTot quad32 = eval_reg_xmm x s
[@va_qattr] unfold let va_eval_opr128 (s:va_state) (o:operand128) : GTot quad32 = eval_operand128 o s
[@va_qattr] unfold let va_eval_heaplet (s:va_state) (h:heaplet_id) : vale_heap = va_get_mem_heaplet h s
(* Predicates *)
[@va_qattr] unfold let va_is_src_opr64 (o:operand64) (s:va_state) = valid_operand o s
[@va_qattr] let va_is_dst_opr64 (o:operand64) (s:va_state) = match o with OReg r -> not (r = rRsp ) | _ -> false
[@va_qattr] unfold let va_is_dst_dst_opr64 (o:operand64) (s:va_state) = va_is_dst_opr64 o s
[@va_qattr] unfold let va_is_src_shift_amt64 (o:operand64) (s:va_state) = valid_operand o s /\ (va_eval_shift_amt64 s o) < 64
[@va_qattr] unfold let va_is_src_reg_opr64 (o:operand64) (s:va_state) = OReg? o
[@va_qattr] unfold let va_is_dst_reg_opr64 (o:operand64) (s:va_state) = OReg? o /\ not (rRsp = (OReg?.r o))
[@va_qattr] unfold let va_is_src_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_xmm (x:reg_xmm) (s:va_state) = True
[@va_qattr] unfold let va_is_src_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_dst_opr128 (o:operand128) (s:va_state) = valid_operand128 o s
[@va_qattr] unfold let va_is_src_heaplet (h:heaplet_id) (s:va_state) = True
[@va_qattr] unfold let va_is_dst_heaplet (h:heaplet_id) (s:va_state) = True
(* Framing: va_update_foo means the two states are the same except for foo *)
[@va_qattr] unfold let va_update_ok (sM:va_state) (sK:va_state) : va_state = va_upd_ok sM.vs_ok sK
[@va_qattr] unfold let va_update_flags (sM:va_state) (sK:va_state) : va_state = va_upd_flags sM.vs_flags sK
[@va_qattr] unfold let update_register (r:reg) (sM:va_state) (sK:va_state) : va_state =
upd_register r (eval_reg r sM) sK
[@va_qattr] unfold let va_update_reg64 (r:reg_64) (sM:va_state) (sK:va_state) : va_state =
va_upd_reg64 r (eval_reg_64 r sM) sK
[@va_qattr] unfold let va_update_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
va_upd_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold let va_update_mem (sM:va_state) (sK:va_state) : va_state = va_upd_mem sM.vs_heap.vf_heap sK
[@va_qattr] unfold let va_update_mem_layout (sM:va_state) (sK:va_state) : va_state = va_upd_mem_layout sM.vs_heap.vf_layout sK
[@va_qattr] unfold let va_update_mem_heaplet (n:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_upd_mem_heaplet n (Map16.sel sM.vs_heap.vf_heaplets n) sK
[@va_qattr] unfold let va_update_stack (sM:va_state) (sK:va_state) : va_state = va_upd_stack sM.vs_stack sK
[@va_qattr] unfold let va_update_stackTaint (sM:va_state) (sK:va_state) : va_state = va_upd_stackTaint sM.vs_stackTaint sK
[@va_qattr]
let update_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
match o with
| OConst n -> sK
| OReg r -> va_update_reg64 r sM sK
| OMem (m, _) -> va_update_mem sM sK
| OStack (m, _) -> va_update_stack sM sK
[@va_qattr] unfold
let update_dst_operand (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_operand o sM sK
[@va_qattr] unfold
let va_update_operand_dst_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_reg_opr64 (o:operand64) (sM:va_state) (sK:va_state) : va_state =
update_dst_operand o sM sK
[@va_qattr] unfold
let va_update_operand_xmm (x:reg_xmm) (sM:va_state) (sK:va_state) : va_state =
update_reg_xmm x (eval_reg_xmm x sM) sK
[@va_qattr] unfold
let va_update_operand_heaplet (h:heaplet_id) (sM:va_state) (sK:va_state) : va_state =
va_update_mem_heaplet h sM sK
unfold let va_value_opr64 = nat64
unfold let va_value_dst_opr64 = nat64
unfold let va_value_reg_opr64 = nat64
unfold let va_value_xmm = quad32
unfold let va_value_heaplet = vale_heap
[@va_qattr]
let va_upd_operand_xmm (x:reg_xmm) (v:quad32) (s:vale_state) : vale_state =
update_reg_xmm x v s
[@va_qattr]
let va_upd_operand_dst_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s // TODO: support destination memory operands
| OStack (m, _) -> s // TODO: support destination stack operands
[@va_qattr]
let va_upd_operand_reg_opr64 (o:operand64) (v:nat64) (s:vale_state) =
match o with
| OConst n -> s
| OReg r -> update_reg_64 r v s
| OMem (m, _) -> s
| OStack (m, _) -> s
[@va_qattr]
unfold let va_upd_operand_heaplet (h:heaplet_id) (v:vale_heap) (s:va_state) : va_state = va_upd_mem_heaplet h v s
let va_lemma_upd_update (sM:vale_state) : Lemma
(
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_dst_opr64 o sM sK)} va_is_dst_dst_opr64 o sK ==> va_update_operand_dst_opr64 o sM sK == va_upd_operand_dst_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (o:operand64).{:pattern (va_update_operand_reg_opr64 o sM sK)} va_is_dst_reg_opr64 o sK ==> va_update_operand_reg_opr64 o sM sK == va_upd_operand_reg_opr64 o (eval_operand o sM) sK) /\
(forall (sK:vale_state) (x:reg_xmm).{:pattern (va_update_operand_xmm x sM sK)} va_update_operand_xmm x sM sK == va_upd_operand_xmm x (eval_reg_xmm x sM) sK)
)
= ()
(** Constructors for va_codes *)
[@va_qattr] unfold let va_CNil () : va_codes = []
[@va_qattr] unfold let va_CCons (hd:va_code) (tl:va_codes) : va_codes = hd::tl
(** Constructors for va_code *)
unfold let va_Block (block:va_codes) : va_code = Block block
unfold let va_IfElse (ifCond:ocmp) (ifTrue:va_code) (ifFalse:va_code) : va_code = IfElse ifCond ifTrue ifFalse
unfold let va_While (whileCond:ocmp) (whileBody:va_code) : va_code = While whileCond whileBody
val va_cmp_eq (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ne (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_le (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_ge (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_lt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
val va_cmp_gt (o1:operand64{ not (OMem? o1 || OStack? o1) }) (o2:operand64{ not (OMem? o2 || OStack? o2) }) : ocmp
unfold let va_get_block (c:va_code{Block? c}) : va_codes = Block?.block c
unfold let va_get_ifCond (c:va_code{IfElse? c}) : ocmp = IfElse?.ifCond c
unfold let va_get_ifTrue (c:va_code{IfElse? c}) : va_code = IfElse?.ifTrue c
unfold let va_get_ifFalse (c:va_code{IfElse? c}) : va_code = IfElse?.ifFalse c
unfold let va_get_whileCond (c:va_code{While? c}) : ocmp = While?.whileCond c
unfold let va_get_whileBody (c:va_code{While? c}) : va_code = While?.whileBody c
(** Map syntax **)
//unfold let (.[]) (m:vale_heap) (b:M.buffer64) = fun index -> buffer64_read b index m
// syntax for map accesses, m.[key] and m.[key] <- value
(*
type map (key:eqtype) (value:Type) = Map.t key value
unfold let (.[]) = Map.sel
unfold let (.[]<-) = Map.upd
*)
(** Memory framing **)
(*
unfold let in_mem (addr:int) (m:mem) : bool = m `Map.contains` addr
let disjoint (ptr1:int) (num_bytes1:int) (ptr2:int) (num_bytes2:int) =
ptr1 + num_bytes1 <= ptr2 \/ ptr2 + num_bytes2 <= ptr1
let validSrcAddrs (mem:mem) (addr:int) (size:int) (num_bytes:int) =
size == 64 /\
(forall (a:int) . {:pattern (mem `Map.contains` a)} addr <= a && a < addr+num_bytes && (a - addr) % 8 = 0 ==> mem `Map.contains` a)
let memModified (old_mem:mem) (new_mem:mem) (ptr:int) (num_bytes) =
(forall (a:int) . {:pattern (new_mem `Map.contains` a)} old_mem `Map.contains` a <==> new_mem `Map.contains` a) /\
(forall (a:int) . {:pattern (new_mem.[a]) \/ Map.sel new_mem a} a < ptr || a >= ptr + num_bytes ==> old_mem.[a] == new_mem.[ a])
*)
(** Convenient memory-related functions **)
let rec buffers_readable (h: vale_heap) (l: list M.buffer64) : GTot prop0 (decreases l) =
match l with
| [] -> True
| b :: l' -> buffer_readable h b /\ buffers_readable h l'
unfold let modifies_buffer (b:M.buffer64) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer_2 (b1 b2:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer_3 (b1 b2 b3:M.buffer64) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
unfold let modifies_buffer128 (b:M.buffer128) (h1 h2:vale_heap) = modifies_mem (loc_buffer b) h1 h2
unfold let modifies_buffer128_2 (b1 b2:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (loc_buffer b2)) h1 h2
unfold let modifies_buffer128_3 (b1 b2 b3:M.buffer128) (h1 h2:vale_heap) =
modifies_mem (M.loc_union (loc_buffer b1) (M.loc_union (loc_buffer b2) (loc_buffer b3))) h1 h2
let validSrcAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
buffer_readable h b /\
len <= buffer_length b /\
M.buffer_addr b h == addr /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) false /\
M.valid_taint_buf b h layout.vl_taint tn
let validDstAddrs (#t:base_typ) (h:vale_heap) (addr:int) (b:M.buffer t) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn /\
M.valid_layout_buffer_id t b layout (M.get_heaplet_id h) true /\
buffer_writeable b
let validSrcAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn
let validDstAddrs64 (h:vale_heap) (addr:int) (b:M.buffer64) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn
let validSrcAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h addr b len layout tn
let validDstAddrs128 (h:vale_heap) (addr:int) (b:M.buffer128) (len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h addr b len layout tn
let validSrcAddrsOffset128 (h:vale_heap) (addr:int) (b:M.buffer128) (offset len:int) (layout:vale_heap_layout) (tn:taint) =
validSrcAddrs h (addr - 16 * offset) b (len + offset) layout tn
let validDstAddrsOffset128 (h:vale_heap) (addr:int) (b:M.buffer128) (offset len:int) (layout:vale_heap_layout) (tn:taint) =
validDstAddrs h (addr - 16 * offset) b (len + offset) layout tn
let modifies_buffer_specific128 (b:M.buffer128) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
modifies_buffer128 b h1 h2 /\
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer128_read b i h1
== buffer128_read b i h2)
let buffer_modifies_specific128 (b:M.buffer128) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer128_read b i h1
== buffer128_read b i h2)
let modifies_buffer_specific (b:M.buffer64) (h1 h2:vale_heap) (start last:nat) : GTot prop0 =
modifies_buffer b h1 h2 /\
// TODO: Consider replacing this with: modifies (loc_buffer (gsub_buffer b i len)) h1 h2
(forall (i:nat) . {:pattern (Seq.index (M.buffer_as_seq h2 b) i)}
0 <= i /\ i < buffer_length b
/\ (i < start || i > last)
==> buffer64_read b i h1
== buffer64_read b i h2)
unfold let buffers_disjoint (b1 b2:M.buffer64) =
locs_disjoint [loc_buffer b1; loc_buffer b2]
unfold let buffers_disjoint128 (b1 b2:M.buffer128) =
locs_disjoint [loc_buffer b1; loc_buffer b2]
let rec loc_locs_disjoint_rec128 (l:M.buffer128) (ls:list (M.buffer128)) : prop0 =
match ls with
| [] -> True
| h::t -> locs_disjoint [loc_buffer l; loc_buffer h] /\ loc_locs_disjoint_rec128 l t
unfold
let buffer_disjoints128 (l:M.buffer128) (ls:list (M.buffer128)) : prop0 =
norm [zeta; iota; delta_only [`%loc_locs_disjoint_rec128]] (loc_locs_disjoint_rec128 l ls)
unfold let buffers3_disjoint128 (b1 b2 b3:M.buffer128) =
locs_disjoint [loc_buffer b1; loc_buffer b2; loc_buffer b3]
val eval_code (c:va_code) (s0:va_state) (f0:va_fuel) (sN:va_state) : prop0
val eval_while_inv (c:va_code) (s0:va_state) (fW:va_fuel) (sW:va_state) : prop0
[@va_qattr]
let va_state_eq (s0:va_state) (s1:va_state) : prop0 = state_eq s0 s1
let state_inv (s:va_state) : prop0 = M.mem_inv s.vs_heap
let vale_state_with_inv = s:va_state{state_inv s}
let va_require_total (c0:va_code) (c1:va_code) (s0:va_state) : prop0 =
c0 == c1 /\ state_inv s0
let va_ensure_total (c0:va_code) (s0:va_state) (s1:va_state) (f1:va_fuel) : prop0 =
eval_code c0 s0 f1 s1 /\ state_inv s1
val eval_ocmp : s:va_state -> c:ocmp -> GTot bool
unfold let va_evalCond (b:ocmp) (s:va_state) : GTot bool = eval_ocmp s b
val valid_ocmp : c:ocmp -> s:va_state -> GTot bool
val havoc_flags : Flags.t
val lemma_cmp_eq : s:va_state -> o1:operand64{ not (OMem? o1 || OStack? o1) } -> o2:operand64{ not (OMem? o2 || OStack? o2) } -> Lemma
(requires True)
(ensures (eval_ocmp s (va_cmp_eq o1 o2)) <==> (va_eval_opr64 s o1 == va_eval_opr64 s o2))
[SMTPat (eval_ocmp s (va_cmp_eq o1 o2))]
val lemma_cmp_ne : s:va_state -> o1:operand64{ not (OMem? o1 || OStack? o1) } -> o2:operand64{ not (OMem? o2 || OStack? o2) } -> Lemma
(requires True)
(ensures (eval_ocmp s (va_cmp_ne o1 o2)) <==> (va_eval_opr64 s o1 <> va_eval_opr64 s o2))
[SMTPat (eval_ocmp s (va_cmp_ne o1 o2))]
val lemma_cmp_le : s:va_state -> o1:operand64{ not (OMem? o1 || OStack? o1) } -> o2:operand64{ not (OMem? o2 || OStack? o2) } -> Lemma
(requires True)
(ensures (eval_ocmp s (va_cmp_le o1 o2)) <==> (va_eval_opr64 s o1 <= va_eval_opr64 s o2))
[SMTPat (eval_ocmp s (va_cmp_le o1 o2))]
val lemma_cmp_ge : s:va_state -> o1:operand64{ not (OMem? o1 || OStack? o1) } -> o2:operand64{ not (OMem? o2 || OStack? o2) } -> Lemma
(requires True)
(ensures (eval_ocmp s (va_cmp_ge o1 o2)) <==> (va_eval_opr64 s o1 >= va_eval_opr64 s o2))
[SMTPat (eval_ocmp s (va_cmp_ge o1 o2))]
val lemma_cmp_lt : s:va_state -> o1:operand64{ not (OMem? o1 || OStack? o1) } -> o2:operand64{ not (OMem? o2 || OStack? o2) } -> Lemma
(requires True)
(ensures (eval_ocmp s (va_cmp_lt o1 o2)) <==> (va_eval_opr64 s o1 < va_eval_opr64 s o2))
[SMTPat (eval_ocmp s (va_cmp_lt o1 o2))]
val lemma_cmp_gt : s:va_state -> o1:operand64{ not (OMem? o1 || OStack? o1) } -> o2:operand64{ not (OMem? o2 || OStack? o2) } -> Lemma
(requires True)
(ensures (eval_ocmp s (va_cmp_gt o1 o2)) <==> (va_eval_opr64 s o1 > va_eval_opr64 s o2))
[SMTPat (eval_ocmp s (va_cmp_gt o1 o2))]
val lemma_valid_cmp_eq : s:va_state -> o1:operand64{ not (OMem? o1 || OStack? o1) } -> o2:operand64{ not (OMem? o2 || OStack? o2) } -> Lemma
(requires True)
(ensures (valid_operand o1 s /\ valid_operand o2 s) ==> (valid_ocmp (va_cmp_eq o1 o2) s))
[SMTPat (valid_ocmp (va_cmp_eq o1 o2) s)]
val lemma_valid_cmp_ne : s:va_state -> o1:operand64{ not (OMem? o1 || OStack? o1) } -> o2:operand64{ not (OMem? o2 || OStack? o2) } -> Lemma
(requires True)
(ensures (valid_operand o1 s /\ valid_operand o2 s) ==> (valid_ocmp (va_cmp_ne o1 o2) s))
[SMTPat (valid_ocmp (va_cmp_ne o1 o2) s)]
val lemma_valid_cmp_le : s:va_state -> o1:operand64{ not (OMem? o1 || OStack? o1) } -> o2:operand64{ not (OMem? o2 || OStack? o2) } -> Lemma
(requires True)
(ensures (valid_operand o1 s /\ valid_operand o2 s) ==> (valid_ocmp (va_cmp_le o1 o2) s))
[SMTPat (valid_ocmp (va_cmp_le o1 o2) s)]
val lemma_valid_cmp_ge : s:va_state -> o1:operand64{ not (OMem? o1 || OStack? o1) } -> o2:operand64{ not (OMem? o2 || OStack? o2) } -> Lemma
(requires True)
(ensures (valid_operand o1 s /\ valid_operand o2 s) ==> (valid_ocmp (va_cmp_ge o1 o2) s))
[SMTPat (valid_ocmp (va_cmp_ge o1 o2) s)]
val lemma_valid_cmp_lt : s:va_state -> o1:operand64{ not (OMem? o1 || OStack? o1) } -> o2:operand64{ not (OMem? o2 || OStack? o2) } -> Lemma
(requires True)
(ensures (valid_operand o1 s /\ valid_operand o2 s) ==> (valid_ocmp (va_cmp_lt o1 o2) s))
[SMTPat (valid_ocmp (va_cmp_lt o1 o2) s)]
val lemma_valid_cmp_gt : s:va_state -> o1:operand64{ not (OMem? o1 || OStack? o1) } -> o2:operand64{ not (OMem? o2 || OStack? o2) } -> Lemma
(requires True)
(ensures (valid_operand o1 s /\ valid_operand o2 s) ==> (valid_ocmp (va_cmp_gt o1 o2) s))
[SMTPat (valid_ocmp (va_cmp_gt o1 o2) s)]
val va_compute_merge_total (f0:va_fuel) (fM:va_fuel) : va_fuel
val va_lemma_merge_total (b0:va_codes) (s0:va_state) (f0:va_fuel) (sM:va_state) (fM:va_fuel) (sN:va_state) : Ghost va_fuel
(requires
Cons? b0 /\
eval_code (Cons?.hd b0) s0 f0 sM /\
eval_code (va_Block (Cons?.tl b0)) sM fM sN
)
(ensures (fun fN ->
fN == va_compute_merge_total f0 fM /\
eval_code (va_Block b0) s0 fN sN
))
val va_lemma_empty_total (s0:va_state) (bN:va_codes) : Ghost (va_state & va_fuel)
(requires True)
(ensures (fun (sM, fM) ->
s0 == sM /\
eval_code (va_Block []) s0 fM sM
))
val va_lemma_ifElse_total (ifb:ocmp) (ct:va_code) (cf:va_code) (s0:va_state) : Ghost (bool & va_state & va_state & va_fuel)
(requires True)
(ensures (fun (cond, sM, sN, f0) ->
cond == eval_ocmp s0 ifb /\
sM == {s0 with vs_flags = havoc_flags}
))
val va_lemma_ifElseTrue_total (ifb:ocmp) (ct:va_code) (cf:va_code) (s0:va_state) (f0:va_fuel) (sM:va_state) : Lemma
(requires
valid_ocmp ifb s0 /\
eval_ocmp s0 ifb /\
eval_code ct ({s0 with vs_flags = havoc_flags}) f0 sM
)
(ensures
eval_code (IfElse ifb ct cf) s0 f0 sM
)
val va_lemma_ifElseFalse_total (ifb:ocmp) (ct:va_code) (cf:va_code) (s0:va_state) (f0:va_fuel) (sM:va_state) : Lemma
(requires
valid_ocmp ifb s0 /\
not (eval_ocmp s0 ifb) /\
eval_code cf ({s0 with vs_flags = havoc_flags}) f0 sM
)
(ensures
eval_code (IfElse ifb ct cf) s0 f0 sM
)
let va_whileInv_total (b:ocmp) (c:va_code) (s0:va_state) (sN:va_state) (f0:va_fuel) : prop0 =
eval_while_inv (While b c) s0 f0 sN /\ state_inv s0
val va_lemma_while_total (b:ocmp) (c:va_code) (s0:va_state) : Ghost (va_state & va_fuel)
(requires True)
(ensures fun (s1, f1) ->
s1 == s0 /\
eval_while_inv (While b c) s1 f1 s1
)
val va_lemma_whileTrue_total (b:ocmp) (c:va_code) (s0:va_state) (sW:va_state) (fW:va_fuel) : Ghost (va_state & va_fuel)
(requires eval_ocmp sW b /\ valid_ocmp b sW)
(ensures fun (s1, f1) -> s1 == {sW with vs_flags = havoc_flags} /\ f1 == fW)
val va_lemma_whileFalse_total (b:ocmp) (c:va_code) (s0:va_state) (sW:va_state) (fW:va_fuel) : Ghost (va_state & va_fuel)
(requires
valid_ocmp b sW /\
not (eval_ocmp sW b) /\
eval_while_inv (While b c) s0 fW sW
)
(ensures fun (s1, f1) ->
s1 == {sW with vs_flags = havoc_flags} /\
eval_code (While b c) s0 f1 s1
)
val va_lemma_whileMerge_total (c:va_code) (s0:va_state) (f0:va_fuel) (sM:va_state) (fM:va_fuel) (sN:va_state) : Ghost va_fuel
(requires While? c /\ (
let cond = While?.whileCond c in
sN.vs_ok /\
valid_ocmp cond sM /\
eval_ocmp sM cond /\
eval_while_inv c s0 f0 sM /\
eval_code (While?.whileBody c) ({sM with vs_flags = havoc_flags}) fM sN
))
(ensures (fun fN ->
eval_while_inv c s0 fN sN
))
val printer : Type0
val print_string : string -> FStar.All.ML unit
val print_header : printer -> FStar.All.ML unit
val print_proc : (name:string) -> (code:va_code) -> (label:int) -> (p:printer) -> FStar.All.ML int
val print_footer : printer -> FStar.All.ML unit
val masm : printer
val gcc : printer
val gcc_linux : printer
unfold let memTaint_type = Map.t int taint | {
"checked_file": "/",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.Stack_i.fsti.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.HeapTypes_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.All.fst.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Decls.fsti"
} | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Stack_i",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.X64.Memory",
"short_module": "M"
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapTypes_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | o1: Vale.X64.Machine_s.operand64 -> o2: Vale.X64.Machine_s.operand64 -> Vale.Def.Prop_s.prop0 | Prims.Tot | [
"total"
] | [] | [
"Vale.X64.Machine_s.operand64",
"FStar.Pervasives.Native.Mktuple2",
"Vale.X64.Machine_s.operand",
"Vale.X64.Machine_s.nat64",
"Vale.X64.Machine_s.reg_64",
"Vale.X64.Machine_s.tmaddr",
"Prims.l_False",
"FStar.Pervasives.Native.tuple2",
"Prims.l_True",
"Vale.Def.Prop_s.prop0"
] | [] | false | false | false | true | false | let max_one_mem (o1 o2: operand64) : prop0 =
| match (o1, o2) with
| OMem _, OMem _ | OMem _, OStack _ | OStack _, OMem _ | OStack _, OStack _ -> False
| _ -> True | false |
Hacl.Spec.Poly1305.Equiv.fst | Hacl.Spec.Poly1305.Equiv.block_v | val block_v : w: Hacl.Spec.Poly1305.Vec.lanes{w * Hacl.Spec.Poly1305.Vec.size_block <= Lib.IntTypes.max_size_t}
-> Type0 | let block_v (w:lanes{w * size_block <= max_size_t}) = lbytes (w * size_block) | {
"file_name": "code/poly1305/Hacl.Spec.Poly1305.Equiv.fst",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 77,
"end_line": 19,
"start_col": 0,
"start_line": 19
} | module Hacl.Spec.Poly1305.Equiv
open FStar.Mul
open Lib.IntTypes
open Lib.Sequence
open Lib.ByteSequence
open Lib.IntVector
module Loops = Lib.LoopCombinators
module VecLemmas = Lib.Vec.Lemmas
module SeqLemmas = Lib.Sequence.Lemmas
module Lemmas = Hacl.Spec.Poly1305.Lemmas
module S = Spec.Poly1305
include Hacl.Spec.Poly1305.Vec
#set-options "--z3rlimit 50 --max_fuel 0 --max_ifuel 0" | {
"checked_file": "/",
"dependencies": [
"Spec.Poly1305.fst.checked",
"prims.fst.checked",
"Lib.Vec.Lemmas.fsti.checked",
"Lib.Sequence.Lemmas.fsti.checked",
"Lib.Sequence.fsti.checked",
"Lib.LoopCombinators.fsti.checked",
"Lib.IntVector.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteSequence.fsti.checked",
"Hacl.Spec.Poly1305.Vec.fst.checked",
"Hacl.Spec.Poly1305.Lemmas.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "Hacl.Spec.Poly1305.Equiv.fst"
} | [
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Vec",
"short_module": null
},
{
"abbrev": true,
"full_module": "Spec.Poly1305",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Hacl.Spec.Poly1305.Lemmas",
"short_module": "Lemmas"
},
{
"abbrev": true,
"full_module": "Lib.Sequence.Lemmas",
"short_module": "SeqLemmas"
},
{
"abbrev": true,
"full_module": "Lib.Vec.Lemmas",
"short_module": "VecLemmas"
},
{
"abbrev": true,
"full_module": "Lib.LoopCombinators",
"short_module": "Loops"
},
{
"abbrev": false,
"full_module": "Lib.IntVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteSequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 50,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | w: Hacl.Spec.Poly1305.Vec.lanes{w * Hacl.Spec.Poly1305.Vec.size_block <= Lib.IntTypes.max_size_t}
-> Type0 | Prims.Tot | [
"total"
] | [] | [
"Hacl.Spec.Poly1305.Vec.lanes",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"FStar.Mul.op_Star",
"Hacl.Spec.Poly1305.Vec.size_block",
"Lib.IntTypes.max_size_t",
"Lib.ByteSequence.lbytes"
] | [] | false | false | false | false | true | let block_v (w: lanes{w * size_block <= max_size_t}) =
| lbytes (w * size_block) | false |
|
Hacl.Spec.Poly1305.Equiv.fst | Hacl.Spec.Poly1305.Equiv.poly1305_update_vec_lemma | val poly1305_update_vec_lemma: #w:lanes -> text:bytes -> acc0:pfelem -> r:pfelem ->
Lemma (poly1305_update #w text acc0 r == S.poly1305_update text acc0 r) | val poly1305_update_vec_lemma: #w:lanes -> text:bytes -> acc0:pfelem -> r:pfelem ->
Lemma (poly1305_update #w text acc0 r == S.poly1305_update text acc0 r) | let poly1305_update_vec_lemma #w text acc0 r =
let len = length text in
let blocksize_v = w * size_block in
let len0 = len / blocksize_v * blocksize_v in
FStar.Math.Lemmas.cancel_mul_mod (len / blocksize_v) blocksize_v;
assert (len0 % blocksize_v = 0);
assert (len0 % size_block = 0);
let text0 = Seq.slice text 0 len0 in
let f = S.poly1305_update1 r size_block in
let l = S.poly1305_update_last r in
if len0 > 0 then begin
poly_update_multi_lemma #w text0 acc0 r;
SeqLemmas.repeat_blocks_split size_block len0 text f l acc0 end | {
"file_name": "code/poly1305/Hacl.Spec.Poly1305.Equiv.fst",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 67,
"end_line": 271,
"start_col": 0,
"start_line": 257
} | module Hacl.Spec.Poly1305.Equiv
open FStar.Mul
open Lib.IntTypes
open Lib.Sequence
open Lib.ByteSequence
open Lib.IntVector
module Loops = Lib.LoopCombinators
module VecLemmas = Lib.Vec.Lemmas
module SeqLemmas = Lib.Sequence.Lemmas
module Lemmas = Hacl.Spec.Poly1305.Lemmas
module S = Spec.Poly1305
include Hacl.Spec.Poly1305.Vec
#set-options "--z3rlimit 50 --max_fuel 0 --max_ifuel 0"
let block_v (w:lanes{w * size_block <= max_size_t}) = lbytes (w * size_block)
///
/// val load_acc_lemma: #w:lanes -> b:block_v w -> acc0:pfelem -> r:pfelem -> Lemma
/// (normalize_n r (load_acc b acc0) == repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
///
val load_acc_lemma1: b:block_v 1 -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc #1 b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
let load_acc_lemma1 b acc0 r =
let f = S.poly1305_update1 r size_block in
let nb = size_block / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0
val load_acc_lemma2: b:block_v 2 -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
let load_acc_lemma2 b acc0 r =
let b0 = Seq.slice b 0 size_block in
let b1 = Seq.slice b size_block (2 * size_block) in
let c0 = pfadd (pow2 128) (nat_from_bytes_le b0) in
let c1 = pfadd (pow2 128) (nat_from_bytes_le b1) in
FStar.Math.Lemmas.modulo_lemma c1 prime;
let f = S.poly1305_update1 r size_block in
let nb = (2 * size_block) / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 1;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0;
Lemmas.poly_update_multi_lemma_load2_simplify acc0 r c0 c1
val load_acc_lemma4: b:block_v 4 -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
let load_acc_lemma4 b acc0 r =
let b0 = Seq.slice b 0 size_block in
let b1 = Seq.slice b size_block (2 * size_block) in
let b2 = Seq.slice b (2 * size_block) (3 * size_block) in
let b3 = Seq.slice b (3 * size_block) (4 * size_block) in
let c0 = pfadd (pow2 128) (nat_from_bytes_le b0) in
let c1 = pfadd (pow2 128) (nat_from_bytes_le b1) in
let c2 = pfadd (pow2 128) (nat_from_bytes_le b2) in
let c3 = pfadd (pow2 128) (nat_from_bytes_le b3) in
FStar.Math.Lemmas.modulo_lemma c1 prime;
FStar.Math.Lemmas.modulo_lemma c2 prime;
FStar.Math.Lemmas.modulo_lemma c3 prime;
let f = S.poly1305_update1 r size_block in
let nb = (4 * size_block) / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 3;
Loops.unfold_repeati nb repeat_f acc0 2;
Loops.unfold_repeati nb repeat_f acc0 1;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0;
Lemmas.poly_update_multi_lemma_load4_simplify acc0 r c0 c1 c2 c3
val load_acc_lemma: #w:lanes -> b:block_v w -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
let load_acc_lemma #w b acc0 r =
match w with
| 1 -> load_acc_lemma1 b acc0 r
| 2 -> load_acc_lemma2 b acc0 r
| 4 -> load_acc_lemma4 b acc0 r
///
/// val poly_update_nblocks_lemma: #w:lanes -> r:pfelem -> b:block_v w -> acc_v0:elem w -> Lemma
/// (let rw = compute_rw r in
/// normalize_n r (poly_update_nblocks #w rw b acc_v0) ==
/// repeat_blocks_multi size_block b (poly_update1 r) (normalize_n r acc_v0))
///
val poly_update_nblocks_lemma1: r:pfelem -> b:block_v 1 -> acc_v0:elem 1 -> Lemma
(let rw = compute_rw #1 r in
normalize_n r (poly1305_update_nblocks rw b acc_v0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) (normalize_n r acc_v0))
let poly_update_nblocks_lemma1 r b acc_v0 =
let acc0 = normalize_n r acc_v0 in
let f = S.poly1305_update1 r size_block in
let nb = size_block / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0
val poly_update_nblocks_lemma2: r:pfelem -> b:block_v 2 -> acc_v0:elem 2 -> Lemma
(let rw = compute_rw #2 r in
normalize_n r (poly1305_update_nblocks rw b acc_v0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) (normalize_n r acc_v0))
let poly_update_nblocks_lemma2 r b acc_v0 =
let acc0 = normalize_n r acc_v0 in
let b0 = Seq.slice b 0 size_block in
let b1 = Seq.slice b size_block (2 * size_block) in
let c0 = pfadd (pow2 128) (nat_from_bytes_le b0) in
let c1 = pfadd (pow2 128) (nat_from_bytes_le b1) in
let f = S.poly1305_update1 r size_block in
let nb = (2 * size_block) / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 1;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0;
Lemmas.poly_update_multi_lemma_load2_simplify acc0 r c0 c1;
Lemmas.poly_update_repeat_blocks_multi_lemma2_simplify acc_v0.[0] acc_v0.[1] c0 c1 r
val poly_update_nblocks_lemma4: r:pfelem -> b:block_v 4 -> acc_v0:elem 4 -> Lemma
(let rw = compute_rw #4 r in
normalize_n r (poly1305_update_nblocks rw b acc_v0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) (normalize_n r acc_v0))
let poly_update_nblocks_lemma4 r b acc_v0 =
let acc0 = normalize_n r acc_v0 in
let b0 = Seq.slice b 0 size_block in
let b1 = Seq.slice b size_block (2 * size_block) in
let b2 = Seq.slice b (2 * size_block) (3 * size_block) in
let b3 = Seq.slice b (3 * size_block) (4 * size_block) in
let c0 = pfadd (pow2 128) (nat_from_bytes_le b0) in
let c1 = pfadd (pow2 128) (nat_from_bytes_le b1) in
let c2 = pfadd (pow2 128) (nat_from_bytes_le b2) in
let c3 = pfadd (pow2 128) (nat_from_bytes_le b3) in
let r2 = pfmul r r in
let r4 = pfmul r2 r2 in
let f = S.poly1305_update1 r size_block in
let nb = (4 * size_block) / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 3;
Loops.unfold_repeati nb repeat_f acc0 2;
Loops.unfold_repeati nb repeat_f acc0 1;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0;
Lemmas.poly_update_repeat_blocks_multi_lemma4_simplify
acc_v0.[0] acc_v0.[1] acc_v0.[2] acc_v0.[3] c0 c1 c2 c3 r r2 r4
val poly_update_nblocks_lemma: #w:lanes -> r:pfelem -> b:block_v w -> acc_v0:elem w -> Lemma
(let rw = compute_rw #w r in
normalize_n r (poly1305_update_nblocks rw b acc_v0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) (normalize_n r acc_v0))
let poly_update_nblocks_lemma #w r b acc_v0 =
match w with
| 1 -> poly_update_nblocks_lemma1 r b acc_v0
| 2 -> poly_update_nblocks_lemma2 r b acc_v0
| 4 -> poly_update_nblocks_lemma4 r b acc_v0
val repeat_blocks_multi_vec_equiv_pre_lemma: #w:lanes -> r:pfelem -> b:block_v w -> acc_v0:elem w -> Lemma
(let rw = compute_rw #w r in
let f = S.poly1305_update1 r size_block in
let f_v = poly1305_update_nblocks rw in
VecLemmas.repeat_blocks_multi_vec_equiv_pre w size_block f f_v (normalize_n r) b acc_v0)
let repeat_blocks_multi_vec_equiv_pre_lemma #w r b acc_v0 =
poly_update_nblocks_lemma #w r b acc_v0
val poly_update_multi_lemma_v:
#w:lanes
-> text:bytes{length text % (w * size_block) = 0 /\ length text % size_block = 0}
-> acc_v0:elem w
-> r:pfelem -> Lemma
(let rw = compute_rw #w r in
let f = S.poly1305_update1 r size_block in
let f_v = poly1305_update_nblocks rw in
normalize_n r (repeat_blocks_multi (w * size_block) text f_v acc_v0) ==
repeat_blocks_multi size_block text f (normalize_n r acc_v0))
let poly_update_multi_lemma_v #w text acc_v0 r =
let rw = compute_rw #w r in
let f = S.poly1305_update1 r size_block in
let f_v = poly1305_update_nblocks rw in
Classical.forall_intro_2 (repeat_blocks_multi_vec_equiv_pre_lemma #w r);
VecLemmas.lemma_repeat_blocks_multi_vec w size_block text f f_v (normalize_n r) acc_v0
val poly_update_multi_lemma:
#w:lanes
-> text:bytes{w * size_block <= length text /\ length text % (w * size_block) = 0 /\ length text % size_block = 0}
-> acc0:pfelem
-> r:pfelem -> Lemma
(poly1305_update_multi #w text acc0 r ==
repeat_blocks_multi size_block text (S.poly1305_update1 r size_block) acc0)
let poly_update_multi_lemma #w text acc0 r =
let len = length text in
let blocksize_v = w * size_block in
let text0 = Seq.slice text 0 blocksize_v in
let text1 = Seq.slice text blocksize_v len in
FStar.Math.Lemmas.modulo_addition_lemma len blocksize_v (- 1);
assert (length text1 % (w * size_block) = 0 /\ length text1 % size_block = 0);
let f = S.poly1305_update1 r size_block in
let acc_v0 = load_acc #w text0 acc0 in
let rp = poly1305_update_multi #w text acc0 r in
poly_update_multi_lemma_v #w text1 acc_v0 r;
load_acc_lemma #w text0 acc0 r;
SeqLemmas.repeat_blocks_multi_split size_block blocksize_v text f acc0
val poly1305_update_vec_lemma: #w:lanes -> text:bytes -> acc0:pfelem -> r:pfelem ->
Lemma (poly1305_update #w text acc0 r == S.poly1305_update text acc0 r) | {
"checked_file": "/",
"dependencies": [
"Spec.Poly1305.fst.checked",
"prims.fst.checked",
"Lib.Vec.Lemmas.fsti.checked",
"Lib.Sequence.Lemmas.fsti.checked",
"Lib.Sequence.fsti.checked",
"Lib.LoopCombinators.fsti.checked",
"Lib.IntVector.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteSequence.fsti.checked",
"Hacl.Spec.Poly1305.Vec.fst.checked",
"Hacl.Spec.Poly1305.Lemmas.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "Hacl.Spec.Poly1305.Equiv.fst"
} | [
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Vec",
"short_module": null
},
{
"abbrev": true,
"full_module": "Spec.Poly1305",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Hacl.Spec.Poly1305.Lemmas",
"short_module": "Lemmas"
},
{
"abbrev": true,
"full_module": "Lib.Sequence.Lemmas",
"short_module": "SeqLemmas"
},
{
"abbrev": true,
"full_module": "Lib.Vec.Lemmas",
"short_module": "VecLemmas"
},
{
"abbrev": true,
"full_module": "Lib.LoopCombinators",
"short_module": "Loops"
},
{
"abbrev": false,
"full_module": "Lib.IntVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteSequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 50,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
text: Lib.ByteSequence.bytes ->
acc0: Hacl.Spec.Poly1305.Vec.pfelem ->
r: Hacl.Spec.Poly1305.Vec.pfelem
-> FStar.Pervasives.Lemma
(ensures
Hacl.Spec.Poly1305.Vec.poly1305_update text acc0 r ==
Spec.Poly1305.poly1305_update text acc0 r) | FStar.Pervasives.Lemma | [
"lemma"
] | [] | [
"Hacl.Spec.Poly1305.Vec.lanes",
"Lib.ByteSequence.bytes",
"Hacl.Spec.Poly1305.Vec.pfelem",
"Prims.op_GreaterThan",
"Lib.Sequence.Lemmas.repeat_blocks_split",
"Lib.IntTypes.uint_t",
"Lib.IntTypes.U8",
"Lib.IntTypes.SEC",
"Spec.Poly1305.felem",
"Hacl.Spec.Poly1305.Vec.size_block",
"Prims.unit",
"Hacl.Spec.Poly1305.Equiv.poly_update_multi_lemma",
"Prims.bool",
"Prims.nat",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"Prims.op_Subtraction",
"Prims.pow2",
"Prims.op_LessThan",
"Lib.Sequence.lseq",
"Lib.IntTypes.int_t",
"Spec.Poly1305.poly1305_update_last",
"Spec.Poly1305.poly1305_update1",
"FStar.Seq.Base.seq",
"FStar.Seq.Base.slice",
"Prims._assert",
"Prims.op_Equality",
"Prims.int",
"Prims.op_Modulus",
"FStar.Math.Lemmas.cancel_mul_mod",
"Prims.op_Division",
"FStar.Mul.op_Star",
"Lib.Sequence.length"
] | [] | false | false | true | false | false | let poly1305_update_vec_lemma #w text acc0 r =
| let len = length text in
let blocksize_v = w * size_block in
let len0 = (len / blocksize_v) * blocksize_v in
FStar.Math.Lemmas.cancel_mul_mod (len / blocksize_v) blocksize_v;
assert (len0 % blocksize_v = 0);
assert (len0 % size_block = 0);
let text0 = Seq.slice text 0 len0 in
let f = S.poly1305_update1 r size_block in
let l = S.poly1305_update_last r in
if len0 > 0
then
(poly_update_multi_lemma #w text0 acc0 r;
SeqLemmas.repeat_blocks_split size_block len0 text f l acc0) | false |
Hacl.Spec.Poly1305.Equiv.fst | Hacl.Spec.Poly1305.Equiv.poly1305_vec_lemma | val poly1305_vec_lemma: #w:lanes -> msg:bytes -> k:S.key ->
Lemma (poly1305_mac #w msg k == S.poly1305_mac msg k) | val poly1305_vec_lemma: #w:lanes -> msg:bytes -> k:S.key ->
Lemma (poly1305_mac #w msg k == S.poly1305_mac msg k) | let poly1305_vec_lemma #w msg k =
let acc0, r = S.poly1305_init k in
poly1305_update_vec_lemma #w msg acc0 r | {
"file_name": "code/poly1305/Hacl.Spec.Poly1305.Equiv.fst",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 41,
"end_line": 279,
"start_col": 0,
"start_line": 277
} | module Hacl.Spec.Poly1305.Equiv
open FStar.Mul
open Lib.IntTypes
open Lib.Sequence
open Lib.ByteSequence
open Lib.IntVector
module Loops = Lib.LoopCombinators
module VecLemmas = Lib.Vec.Lemmas
module SeqLemmas = Lib.Sequence.Lemmas
module Lemmas = Hacl.Spec.Poly1305.Lemmas
module S = Spec.Poly1305
include Hacl.Spec.Poly1305.Vec
#set-options "--z3rlimit 50 --max_fuel 0 --max_ifuel 0"
let block_v (w:lanes{w * size_block <= max_size_t}) = lbytes (w * size_block)
///
/// val load_acc_lemma: #w:lanes -> b:block_v w -> acc0:pfelem -> r:pfelem -> Lemma
/// (normalize_n r (load_acc b acc0) == repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
///
val load_acc_lemma1: b:block_v 1 -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc #1 b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
let load_acc_lemma1 b acc0 r =
let f = S.poly1305_update1 r size_block in
let nb = size_block / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0
val load_acc_lemma2: b:block_v 2 -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
let load_acc_lemma2 b acc0 r =
let b0 = Seq.slice b 0 size_block in
let b1 = Seq.slice b size_block (2 * size_block) in
let c0 = pfadd (pow2 128) (nat_from_bytes_le b0) in
let c1 = pfadd (pow2 128) (nat_from_bytes_le b1) in
FStar.Math.Lemmas.modulo_lemma c1 prime;
let f = S.poly1305_update1 r size_block in
let nb = (2 * size_block) / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 1;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0;
Lemmas.poly_update_multi_lemma_load2_simplify acc0 r c0 c1
val load_acc_lemma4: b:block_v 4 -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
let load_acc_lemma4 b acc0 r =
let b0 = Seq.slice b 0 size_block in
let b1 = Seq.slice b size_block (2 * size_block) in
let b2 = Seq.slice b (2 * size_block) (3 * size_block) in
let b3 = Seq.slice b (3 * size_block) (4 * size_block) in
let c0 = pfadd (pow2 128) (nat_from_bytes_le b0) in
let c1 = pfadd (pow2 128) (nat_from_bytes_le b1) in
let c2 = pfadd (pow2 128) (nat_from_bytes_le b2) in
let c3 = pfadd (pow2 128) (nat_from_bytes_le b3) in
FStar.Math.Lemmas.modulo_lemma c1 prime;
FStar.Math.Lemmas.modulo_lemma c2 prime;
FStar.Math.Lemmas.modulo_lemma c3 prime;
let f = S.poly1305_update1 r size_block in
let nb = (4 * size_block) / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 3;
Loops.unfold_repeati nb repeat_f acc0 2;
Loops.unfold_repeati nb repeat_f acc0 1;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0;
Lemmas.poly_update_multi_lemma_load4_simplify acc0 r c0 c1 c2 c3
val load_acc_lemma: #w:lanes -> b:block_v w -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
let load_acc_lemma #w b acc0 r =
match w with
| 1 -> load_acc_lemma1 b acc0 r
| 2 -> load_acc_lemma2 b acc0 r
| 4 -> load_acc_lemma4 b acc0 r
///
/// val poly_update_nblocks_lemma: #w:lanes -> r:pfelem -> b:block_v w -> acc_v0:elem w -> Lemma
/// (let rw = compute_rw r in
/// normalize_n r (poly_update_nblocks #w rw b acc_v0) ==
/// repeat_blocks_multi size_block b (poly_update1 r) (normalize_n r acc_v0))
///
val poly_update_nblocks_lemma1: r:pfelem -> b:block_v 1 -> acc_v0:elem 1 -> Lemma
(let rw = compute_rw #1 r in
normalize_n r (poly1305_update_nblocks rw b acc_v0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) (normalize_n r acc_v0))
let poly_update_nblocks_lemma1 r b acc_v0 =
let acc0 = normalize_n r acc_v0 in
let f = S.poly1305_update1 r size_block in
let nb = size_block / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0
val poly_update_nblocks_lemma2: r:pfelem -> b:block_v 2 -> acc_v0:elem 2 -> Lemma
(let rw = compute_rw #2 r in
normalize_n r (poly1305_update_nblocks rw b acc_v0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) (normalize_n r acc_v0))
let poly_update_nblocks_lemma2 r b acc_v0 =
let acc0 = normalize_n r acc_v0 in
let b0 = Seq.slice b 0 size_block in
let b1 = Seq.slice b size_block (2 * size_block) in
let c0 = pfadd (pow2 128) (nat_from_bytes_le b0) in
let c1 = pfadd (pow2 128) (nat_from_bytes_le b1) in
let f = S.poly1305_update1 r size_block in
let nb = (2 * size_block) / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 1;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0;
Lemmas.poly_update_multi_lemma_load2_simplify acc0 r c0 c1;
Lemmas.poly_update_repeat_blocks_multi_lemma2_simplify acc_v0.[0] acc_v0.[1] c0 c1 r
val poly_update_nblocks_lemma4: r:pfelem -> b:block_v 4 -> acc_v0:elem 4 -> Lemma
(let rw = compute_rw #4 r in
normalize_n r (poly1305_update_nblocks rw b acc_v0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) (normalize_n r acc_v0))
let poly_update_nblocks_lemma4 r b acc_v0 =
let acc0 = normalize_n r acc_v0 in
let b0 = Seq.slice b 0 size_block in
let b1 = Seq.slice b size_block (2 * size_block) in
let b2 = Seq.slice b (2 * size_block) (3 * size_block) in
let b3 = Seq.slice b (3 * size_block) (4 * size_block) in
let c0 = pfadd (pow2 128) (nat_from_bytes_le b0) in
let c1 = pfadd (pow2 128) (nat_from_bytes_le b1) in
let c2 = pfadd (pow2 128) (nat_from_bytes_le b2) in
let c3 = pfadd (pow2 128) (nat_from_bytes_le b3) in
let r2 = pfmul r r in
let r4 = pfmul r2 r2 in
let f = S.poly1305_update1 r size_block in
let nb = (4 * size_block) / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 3;
Loops.unfold_repeati nb repeat_f acc0 2;
Loops.unfold_repeati nb repeat_f acc0 1;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0;
Lemmas.poly_update_repeat_blocks_multi_lemma4_simplify
acc_v0.[0] acc_v0.[1] acc_v0.[2] acc_v0.[3] c0 c1 c2 c3 r r2 r4
val poly_update_nblocks_lemma: #w:lanes -> r:pfelem -> b:block_v w -> acc_v0:elem w -> Lemma
(let rw = compute_rw #w r in
normalize_n r (poly1305_update_nblocks rw b acc_v0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) (normalize_n r acc_v0))
let poly_update_nblocks_lemma #w r b acc_v0 =
match w with
| 1 -> poly_update_nblocks_lemma1 r b acc_v0
| 2 -> poly_update_nblocks_lemma2 r b acc_v0
| 4 -> poly_update_nblocks_lemma4 r b acc_v0
val repeat_blocks_multi_vec_equiv_pre_lemma: #w:lanes -> r:pfelem -> b:block_v w -> acc_v0:elem w -> Lemma
(let rw = compute_rw #w r in
let f = S.poly1305_update1 r size_block in
let f_v = poly1305_update_nblocks rw in
VecLemmas.repeat_blocks_multi_vec_equiv_pre w size_block f f_v (normalize_n r) b acc_v0)
let repeat_blocks_multi_vec_equiv_pre_lemma #w r b acc_v0 =
poly_update_nblocks_lemma #w r b acc_v0
val poly_update_multi_lemma_v:
#w:lanes
-> text:bytes{length text % (w * size_block) = 0 /\ length text % size_block = 0}
-> acc_v0:elem w
-> r:pfelem -> Lemma
(let rw = compute_rw #w r in
let f = S.poly1305_update1 r size_block in
let f_v = poly1305_update_nblocks rw in
normalize_n r (repeat_blocks_multi (w * size_block) text f_v acc_v0) ==
repeat_blocks_multi size_block text f (normalize_n r acc_v0))
let poly_update_multi_lemma_v #w text acc_v0 r =
let rw = compute_rw #w r in
let f = S.poly1305_update1 r size_block in
let f_v = poly1305_update_nblocks rw in
Classical.forall_intro_2 (repeat_blocks_multi_vec_equiv_pre_lemma #w r);
VecLemmas.lemma_repeat_blocks_multi_vec w size_block text f f_v (normalize_n r) acc_v0
val poly_update_multi_lemma:
#w:lanes
-> text:bytes{w * size_block <= length text /\ length text % (w * size_block) = 0 /\ length text % size_block = 0}
-> acc0:pfelem
-> r:pfelem -> Lemma
(poly1305_update_multi #w text acc0 r ==
repeat_blocks_multi size_block text (S.poly1305_update1 r size_block) acc0)
let poly_update_multi_lemma #w text acc0 r =
let len = length text in
let blocksize_v = w * size_block in
let text0 = Seq.slice text 0 blocksize_v in
let text1 = Seq.slice text blocksize_v len in
FStar.Math.Lemmas.modulo_addition_lemma len blocksize_v (- 1);
assert (length text1 % (w * size_block) = 0 /\ length text1 % size_block = 0);
let f = S.poly1305_update1 r size_block in
let acc_v0 = load_acc #w text0 acc0 in
let rp = poly1305_update_multi #w text acc0 r in
poly_update_multi_lemma_v #w text1 acc_v0 r;
load_acc_lemma #w text0 acc0 r;
SeqLemmas.repeat_blocks_multi_split size_block blocksize_v text f acc0
val poly1305_update_vec_lemma: #w:lanes -> text:bytes -> acc0:pfelem -> r:pfelem ->
Lemma (poly1305_update #w text acc0 r == S.poly1305_update text acc0 r)
let poly1305_update_vec_lemma #w text acc0 r =
let len = length text in
let blocksize_v = w * size_block in
let len0 = len / blocksize_v * blocksize_v in
FStar.Math.Lemmas.cancel_mul_mod (len / blocksize_v) blocksize_v;
assert (len0 % blocksize_v = 0);
assert (len0 % size_block = 0);
let text0 = Seq.slice text 0 len0 in
let f = S.poly1305_update1 r size_block in
let l = S.poly1305_update_last r in
if len0 > 0 then begin
poly_update_multi_lemma #w text0 acc0 r;
SeqLemmas.repeat_blocks_split size_block len0 text f l acc0 end
val poly1305_vec_lemma: #w:lanes -> msg:bytes -> k:S.key ->
Lemma (poly1305_mac #w msg k == S.poly1305_mac msg k) | {
"checked_file": "/",
"dependencies": [
"Spec.Poly1305.fst.checked",
"prims.fst.checked",
"Lib.Vec.Lemmas.fsti.checked",
"Lib.Sequence.Lemmas.fsti.checked",
"Lib.Sequence.fsti.checked",
"Lib.LoopCombinators.fsti.checked",
"Lib.IntVector.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteSequence.fsti.checked",
"Hacl.Spec.Poly1305.Vec.fst.checked",
"Hacl.Spec.Poly1305.Lemmas.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "Hacl.Spec.Poly1305.Equiv.fst"
} | [
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Vec",
"short_module": null
},
{
"abbrev": true,
"full_module": "Spec.Poly1305",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Hacl.Spec.Poly1305.Lemmas",
"short_module": "Lemmas"
},
{
"abbrev": true,
"full_module": "Lib.Sequence.Lemmas",
"short_module": "SeqLemmas"
},
{
"abbrev": true,
"full_module": "Lib.Vec.Lemmas",
"short_module": "VecLemmas"
},
{
"abbrev": true,
"full_module": "Lib.LoopCombinators",
"short_module": "Loops"
},
{
"abbrev": false,
"full_module": "Lib.IntVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteSequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 50,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | msg: Lib.ByteSequence.bytes -> k: Spec.Poly1305.key
-> FStar.Pervasives.Lemma
(ensures Hacl.Spec.Poly1305.Vec.poly1305_mac msg k == Spec.Poly1305.poly1305_mac msg k) | FStar.Pervasives.Lemma | [
"lemma"
] | [] | [
"Hacl.Spec.Poly1305.Vec.lanes",
"Lib.ByteSequence.bytes",
"Spec.Poly1305.key",
"Spec.Poly1305.felem",
"Hacl.Spec.Poly1305.Equiv.poly1305_update_vec_lemma",
"Prims.unit",
"FStar.Pervasives.Native.tuple2",
"Spec.Poly1305.poly1305_init"
] | [] | false | false | true | false | false | let poly1305_vec_lemma #w msg k =
| let acc0, r = S.poly1305_init k in
poly1305_update_vec_lemma #w msg acc0 r | false |
Hacl.Spec.Poly1305.Equiv.fst | Hacl.Spec.Poly1305.Equiv.load_acc_lemma | val load_acc_lemma: #w:lanes -> b:block_v w -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0) | val load_acc_lemma: #w:lanes -> b:block_v w -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0) | let load_acc_lemma #w b acc0 r =
match w with
| 1 -> load_acc_lemma1 b acc0 r
| 2 -> load_acc_lemma2 b acc0 r
| 4 -> load_acc_lemma4 b acc0 r | {
"file_name": "code/poly1305/Hacl.Spec.Poly1305.Equiv.fst",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 33,
"end_line": 103,
"start_col": 0,
"start_line": 99
} | module Hacl.Spec.Poly1305.Equiv
open FStar.Mul
open Lib.IntTypes
open Lib.Sequence
open Lib.ByteSequence
open Lib.IntVector
module Loops = Lib.LoopCombinators
module VecLemmas = Lib.Vec.Lemmas
module SeqLemmas = Lib.Sequence.Lemmas
module Lemmas = Hacl.Spec.Poly1305.Lemmas
module S = Spec.Poly1305
include Hacl.Spec.Poly1305.Vec
#set-options "--z3rlimit 50 --max_fuel 0 --max_ifuel 0"
let block_v (w:lanes{w * size_block <= max_size_t}) = lbytes (w * size_block)
///
/// val load_acc_lemma: #w:lanes -> b:block_v w -> acc0:pfelem -> r:pfelem -> Lemma
/// (normalize_n r (load_acc b acc0) == repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
///
val load_acc_lemma1: b:block_v 1 -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc #1 b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
let load_acc_lemma1 b acc0 r =
let f = S.poly1305_update1 r size_block in
let nb = size_block / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0
val load_acc_lemma2: b:block_v 2 -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
let load_acc_lemma2 b acc0 r =
let b0 = Seq.slice b 0 size_block in
let b1 = Seq.slice b size_block (2 * size_block) in
let c0 = pfadd (pow2 128) (nat_from_bytes_le b0) in
let c1 = pfadd (pow2 128) (nat_from_bytes_le b1) in
FStar.Math.Lemmas.modulo_lemma c1 prime;
let f = S.poly1305_update1 r size_block in
let nb = (2 * size_block) / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 1;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0;
Lemmas.poly_update_multi_lemma_load2_simplify acc0 r c0 c1
val load_acc_lemma4: b:block_v 4 -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
let load_acc_lemma4 b acc0 r =
let b0 = Seq.slice b 0 size_block in
let b1 = Seq.slice b size_block (2 * size_block) in
let b2 = Seq.slice b (2 * size_block) (3 * size_block) in
let b3 = Seq.slice b (3 * size_block) (4 * size_block) in
let c0 = pfadd (pow2 128) (nat_from_bytes_le b0) in
let c1 = pfadd (pow2 128) (nat_from_bytes_le b1) in
let c2 = pfadd (pow2 128) (nat_from_bytes_le b2) in
let c3 = pfadd (pow2 128) (nat_from_bytes_le b3) in
FStar.Math.Lemmas.modulo_lemma c1 prime;
FStar.Math.Lemmas.modulo_lemma c2 prime;
FStar.Math.Lemmas.modulo_lemma c3 prime;
let f = S.poly1305_update1 r size_block in
let nb = (4 * size_block) / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 3;
Loops.unfold_repeati nb repeat_f acc0 2;
Loops.unfold_repeati nb repeat_f acc0 1;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0;
Lemmas.poly_update_multi_lemma_load4_simplify acc0 r c0 c1 c2 c3
val load_acc_lemma: #w:lanes -> b:block_v w -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0) | {
"checked_file": "/",
"dependencies": [
"Spec.Poly1305.fst.checked",
"prims.fst.checked",
"Lib.Vec.Lemmas.fsti.checked",
"Lib.Sequence.Lemmas.fsti.checked",
"Lib.Sequence.fsti.checked",
"Lib.LoopCombinators.fsti.checked",
"Lib.IntVector.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteSequence.fsti.checked",
"Hacl.Spec.Poly1305.Vec.fst.checked",
"Hacl.Spec.Poly1305.Lemmas.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "Hacl.Spec.Poly1305.Equiv.fst"
} | [
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Vec",
"short_module": null
},
{
"abbrev": true,
"full_module": "Spec.Poly1305",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Hacl.Spec.Poly1305.Lemmas",
"short_module": "Lemmas"
},
{
"abbrev": true,
"full_module": "Lib.Sequence.Lemmas",
"short_module": "SeqLemmas"
},
{
"abbrev": true,
"full_module": "Lib.Vec.Lemmas",
"short_module": "VecLemmas"
},
{
"abbrev": true,
"full_module": "Lib.LoopCombinators",
"short_module": "Loops"
},
{
"abbrev": false,
"full_module": "Lib.IntVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteSequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 50,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
b: Hacl.Spec.Poly1305.Equiv.block_v w ->
acc0: Hacl.Spec.Poly1305.Vec.pfelem ->
r: Hacl.Spec.Poly1305.Vec.pfelem
-> FStar.Pervasives.Lemma
(ensures
Hacl.Spec.Poly1305.Vec.normalize_n r (Hacl.Spec.Poly1305.Vec.load_acc b acc0) ==
Lib.Sequence.repeat_blocks_multi Hacl.Spec.Poly1305.Vec.size_block
b
(Spec.Poly1305.poly1305_update1 r Hacl.Spec.Poly1305.Vec.size_block)
acc0) | FStar.Pervasives.Lemma | [
"lemma"
] | [] | [
"Hacl.Spec.Poly1305.Vec.lanes",
"Hacl.Spec.Poly1305.Equiv.block_v",
"Hacl.Spec.Poly1305.Vec.pfelem",
"Hacl.Spec.Poly1305.Equiv.load_acc_lemma1",
"Hacl.Spec.Poly1305.Equiv.load_acc_lemma2",
"Hacl.Spec.Poly1305.Equiv.load_acc_lemma4",
"Prims.unit"
] | [] | false | false | true | false | false | let load_acc_lemma #w b acc0 r =
| match w with
| 1 -> load_acc_lemma1 b acc0 r
| 2 -> load_acc_lemma2 b acc0 r
| 4 -> load_acc_lemma4 b acc0 r | false |
Hacl.Spec.Poly1305.Equiv.fst | Hacl.Spec.Poly1305.Equiv.repeat_blocks_multi_vec_equiv_pre_lemma | val repeat_blocks_multi_vec_equiv_pre_lemma: #w:lanes -> r:pfelem -> b:block_v w -> acc_v0:elem w -> Lemma
(let rw = compute_rw #w r in
let f = S.poly1305_update1 r size_block in
let f_v = poly1305_update_nblocks rw in
VecLemmas.repeat_blocks_multi_vec_equiv_pre w size_block f f_v (normalize_n r) b acc_v0) | val repeat_blocks_multi_vec_equiv_pre_lemma: #w:lanes -> r:pfelem -> b:block_v w -> acc_v0:elem w -> Lemma
(let rw = compute_rw #w r in
let f = S.poly1305_update1 r size_block in
let f_v = poly1305_update_nblocks rw in
VecLemmas.repeat_blocks_multi_vec_equiv_pre w size_block f f_v (normalize_n r) b acc_v0) | let repeat_blocks_multi_vec_equiv_pre_lemma #w r b acc_v0 =
poly_update_nblocks_lemma #w r b acc_v0 | {
"file_name": "code/poly1305/Hacl.Spec.Poly1305.Equiv.fst",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 41,
"end_line": 206,
"start_col": 0,
"start_line": 205
} | module Hacl.Spec.Poly1305.Equiv
open FStar.Mul
open Lib.IntTypes
open Lib.Sequence
open Lib.ByteSequence
open Lib.IntVector
module Loops = Lib.LoopCombinators
module VecLemmas = Lib.Vec.Lemmas
module SeqLemmas = Lib.Sequence.Lemmas
module Lemmas = Hacl.Spec.Poly1305.Lemmas
module S = Spec.Poly1305
include Hacl.Spec.Poly1305.Vec
#set-options "--z3rlimit 50 --max_fuel 0 --max_ifuel 0"
let block_v (w:lanes{w * size_block <= max_size_t}) = lbytes (w * size_block)
///
/// val load_acc_lemma: #w:lanes -> b:block_v w -> acc0:pfelem -> r:pfelem -> Lemma
/// (normalize_n r (load_acc b acc0) == repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
///
val load_acc_lemma1: b:block_v 1 -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc #1 b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
let load_acc_lemma1 b acc0 r =
let f = S.poly1305_update1 r size_block in
let nb = size_block / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0
val load_acc_lemma2: b:block_v 2 -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
let load_acc_lemma2 b acc0 r =
let b0 = Seq.slice b 0 size_block in
let b1 = Seq.slice b size_block (2 * size_block) in
let c0 = pfadd (pow2 128) (nat_from_bytes_le b0) in
let c1 = pfadd (pow2 128) (nat_from_bytes_le b1) in
FStar.Math.Lemmas.modulo_lemma c1 prime;
let f = S.poly1305_update1 r size_block in
let nb = (2 * size_block) / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 1;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0;
Lemmas.poly_update_multi_lemma_load2_simplify acc0 r c0 c1
val load_acc_lemma4: b:block_v 4 -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
let load_acc_lemma4 b acc0 r =
let b0 = Seq.slice b 0 size_block in
let b1 = Seq.slice b size_block (2 * size_block) in
let b2 = Seq.slice b (2 * size_block) (3 * size_block) in
let b3 = Seq.slice b (3 * size_block) (4 * size_block) in
let c0 = pfadd (pow2 128) (nat_from_bytes_le b0) in
let c1 = pfadd (pow2 128) (nat_from_bytes_le b1) in
let c2 = pfadd (pow2 128) (nat_from_bytes_le b2) in
let c3 = pfadd (pow2 128) (nat_from_bytes_le b3) in
FStar.Math.Lemmas.modulo_lemma c1 prime;
FStar.Math.Lemmas.modulo_lemma c2 prime;
FStar.Math.Lemmas.modulo_lemma c3 prime;
let f = S.poly1305_update1 r size_block in
let nb = (4 * size_block) / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 3;
Loops.unfold_repeati nb repeat_f acc0 2;
Loops.unfold_repeati nb repeat_f acc0 1;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0;
Lemmas.poly_update_multi_lemma_load4_simplify acc0 r c0 c1 c2 c3
val load_acc_lemma: #w:lanes -> b:block_v w -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
let load_acc_lemma #w b acc0 r =
match w with
| 1 -> load_acc_lemma1 b acc0 r
| 2 -> load_acc_lemma2 b acc0 r
| 4 -> load_acc_lemma4 b acc0 r
///
/// val poly_update_nblocks_lemma: #w:lanes -> r:pfelem -> b:block_v w -> acc_v0:elem w -> Lemma
/// (let rw = compute_rw r in
/// normalize_n r (poly_update_nblocks #w rw b acc_v0) ==
/// repeat_blocks_multi size_block b (poly_update1 r) (normalize_n r acc_v0))
///
val poly_update_nblocks_lemma1: r:pfelem -> b:block_v 1 -> acc_v0:elem 1 -> Lemma
(let rw = compute_rw #1 r in
normalize_n r (poly1305_update_nblocks rw b acc_v0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) (normalize_n r acc_v0))
let poly_update_nblocks_lemma1 r b acc_v0 =
let acc0 = normalize_n r acc_v0 in
let f = S.poly1305_update1 r size_block in
let nb = size_block / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0
val poly_update_nblocks_lemma2: r:pfelem -> b:block_v 2 -> acc_v0:elem 2 -> Lemma
(let rw = compute_rw #2 r in
normalize_n r (poly1305_update_nblocks rw b acc_v0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) (normalize_n r acc_v0))
let poly_update_nblocks_lemma2 r b acc_v0 =
let acc0 = normalize_n r acc_v0 in
let b0 = Seq.slice b 0 size_block in
let b1 = Seq.slice b size_block (2 * size_block) in
let c0 = pfadd (pow2 128) (nat_from_bytes_le b0) in
let c1 = pfadd (pow2 128) (nat_from_bytes_le b1) in
let f = S.poly1305_update1 r size_block in
let nb = (2 * size_block) / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 1;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0;
Lemmas.poly_update_multi_lemma_load2_simplify acc0 r c0 c1;
Lemmas.poly_update_repeat_blocks_multi_lemma2_simplify acc_v0.[0] acc_v0.[1] c0 c1 r
val poly_update_nblocks_lemma4: r:pfelem -> b:block_v 4 -> acc_v0:elem 4 -> Lemma
(let rw = compute_rw #4 r in
normalize_n r (poly1305_update_nblocks rw b acc_v0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) (normalize_n r acc_v0))
let poly_update_nblocks_lemma4 r b acc_v0 =
let acc0 = normalize_n r acc_v0 in
let b0 = Seq.slice b 0 size_block in
let b1 = Seq.slice b size_block (2 * size_block) in
let b2 = Seq.slice b (2 * size_block) (3 * size_block) in
let b3 = Seq.slice b (3 * size_block) (4 * size_block) in
let c0 = pfadd (pow2 128) (nat_from_bytes_le b0) in
let c1 = pfadd (pow2 128) (nat_from_bytes_le b1) in
let c2 = pfadd (pow2 128) (nat_from_bytes_le b2) in
let c3 = pfadd (pow2 128) (nat_from_bytes_le b3) in
let r2 = pfmul r r in
let r4 = pfmul r2 r2 in
let f = S.poly1305_update1 r size_block in
let nb = (4 * size_block) / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 3;
Loops.unfold_repeati nb repeat_f acc0 2;
Loops.unfold_repeati nb repeat_f acc0 1;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0;
Lemmas.poly_update_repeat_blocks_multi_lemma4_simplify
acc_v0.[0] acc_v0.[1] acc_v0.[2] acc_v0.[3] c0 c1 c2 c3 r r2 r4
val poly_update_nblocks_lemma: #w:lanes -> r:pfelem -> b:block_v w -> acc_v0:elem w -> Lemma
(let rw = compute_rw #w r in
normalize_n r (poly1305_update_nblocks rw b acc_v0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) (normalize_n r acc_v0))
let poly_update_nblocks_lemma #w r b acc_v0 =
match w with
| 1 -> poly_update_nblocks_lemma1 r b acc_v0
| 2 -> poly_update_nblocks_lemma2 r b acc_v0
| 4 -> poly_update_nblocks_lemma4 r b acc_v0
val repeat_blocks_multi_vec_equiv_pre_lemma: #w:lanes -> r:pfelem -> b:block_v w -> acc_v0:elem w -> Lemma
(let rw = compute_rw #w r in
let f = S.poly1305_update1 r size_block in
let f_v = poly1305_update_nblocks rw in
VecLemmas.repeat_blocks_multi_vec_equiv_pre w size_block f f_v (normalize_n r) b acc_v0) | {
"checked_file": "/",
"dependencies": [
"Spec.Poly1305.fst.checked",
"prims.fst.checked",
"Lib.Vec.Lemmas.fsti.checked",
"Lib.Sequence.Lemmas.fsti.checked",
"Lib.Sequence.fsti.checked",
"Lib.LoopCombinators.fsti.checked",
"Lib.IntVector.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteSequence.fsti.checked",
"Hacl.Spec.Poly1305.Vec.fst.checked",
"Hacl.Spec.Poly1305.Lemmas.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "Hacl.Spec.Poly1305.Equiv.fst"
} | [
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Vec",
"short_module": null
},
{
"abbrev": true,
"full_module": "Spec.Poly1305",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Hacl.Spec.Poly1305.Lemmas",
"short_module": "Lemmas"
},
{
"abbrev": true,
"full_module": "Lib.Sequence.Lemmas",
"short_module": "SeqLemmas"
},
{
"abbrev": true,
"full_module": "Lib.Vec.Lemmas",
"short_module": "VecLemmas"
},
{
"abbrev": true,
"full_module": "Lib.LoopCombinators",
"short_module": "Loops"
},
{
"abbrev": false,
"full_module": "Lib.IntVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteSequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 50,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
r: Hacl.Spec.Poly1305.Vec.pfelem ->
b: Hacl.Spec.Poly1305.Equiv.block_v w ->
acc_v0: Hacl.Spec.Poly1305.Vec.elem w
-> FStar.Pervasives.Lemma
(ensures
(let rw = Hacl.Spec.Poly1305.Vec.compute_rw r in
let f = Spec.Poly1305.poly1305_update1 r Hacl.Spec.Poly1305.Vec.size_block in
let f_v = Hacl.Spec.Poly1305.Vec.poly1305_update_nblocks rw in
Lib.Vec.Lemmas.repeat_blocks_multi_vec_equiv_pre w
Hacl.Spec.Poly1305.Vec.size_block
f
f_v
(Hacl.Spec.Poly1305.Vec.normalize_n r)
b
acc_v0)) | FStar.Pervasives.Lemma | [
"lemma"
] | [] | [
"Hacl.Spec.Poly1305.Vec.lanes",
"Hacl.Spec.Poly1305.Vec.pfelem",
"Hacl.Spec.Poly1305.Equiv.block_v",
"Hacl.Spec.Poly1305.Vec.elem",
"Hacl.Spec.Poly1305.Equiv.poly_update_nblocks_lemma",
"Prims.unit"
] | [] | true | false | true | false | false | let repeat_blocks_multi_vec_equiv_pre_lemma #w r b acc_v0 =
| poly_update_nblocks_lemma #w r b acc_v0 | false |
Hacl.Spec.Poly1305.Equiv.fst | Hacl.Spec.Poly1305.Equiv.load_acc_lemma2 | val load_acc_lemma2: b:block_v 2 -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0) | val load_acc_lemma2: b:block_v 2 -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0) | let load_acc_lemma2 b acc0 r =
let b0 = Seq.slice b 0 size_block in
let b1 = Seq.slice b size_block (2 * size_block) in
let c0 = pfadd (pow2 128) (nat_from_bytes_le b0) in
let c1 = pfadd (pow2 128) (nat_from_bytes_le b1) in
FStar.Math.Lemmas.modulo_lemma c1 prime;
let f = S.poly1305_update1 r size_block in
let nb = (2 * size_block) / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 1;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0;
Lemmas.poly_update_multi_lemma_load2_simplify acc0 r c0 c1 | {
"file_name": "code/poly1305/Hacl.Spec.Poly1305.Equiv.fst",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 60,
"end_line": 60,
"start_col": 0,
"start_line": 44
} | module Hacl.Spec.Poly1305.Equiv
open FStar.Mul
open Lib.IntTypes
open Lib.Sequence
open Lib.ByteSequence
open Lib.IntVector
module Loops = Lib.LoopCombinators
module VecLemmas = Lib.Vec.Lemmas
module SeqLemmas = Lib.Sequence.Lemmas
module Lemmas = Hacl.Spec.Poly1305.Lemmas
module S = Spec.Poly1305
include Hacl.Spec.Poly1305.Vec
#set-options "--z3rlimit 50 --max_fuel 0 --max_ifuel 0"
let block_v (w:lanes{w * size_block <= max_size_t}) = lbytes (w * size_block)
///
/// val load_acc_lemma: #w:lanes -> b:block_v w -> acc0:pfelem -> r:pfelem -> Lemma
/// (normalize_n r (load_acc b acc0) == repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
///
val load_acc_lemma1: b:block_v 1 -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc #1 b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
let load_acc_lemma1 b acc0 r =
let f = S.poly1305_update1 r size_block in
let nb = size_block / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0
val load_acc_lemma2: b:block_v 2 -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0) | {
"checked_file": "/",
"dependencies": [
"Spec.Poly1305.fst.checked",
"prims.fst.checked",
"Lib.Vec.Lemmas.fsti.checked",
"Lib.Sequence.Lemmas.fsti.checked",
"Lib.Sequence.fsti.checked",
"Lib.LoopCombinators.fsti.checked",
"Lib.IntVector.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteSequence.fsti.checked",
"Hacl.Spec.Poly1305.Vec.fst.checked",
"Hacl.Spec.Poly1305.Lemmas.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "Hacl.Spec.Poly1305.Equiv.fst"
} | [
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Vec",
"short_module": null
},
{
"abbrev": true,
"full_module": "Spec.Poly1305",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Hacl.Spec.Poly1305.Lemmas",
"short_module": "Lemmas"
},
{
"abbrev": true,
"full_module": "Lib.Sequence.Lemmas",
"short_module": "SeqLemmas"
},
{
"abbrev": true,
"full_module": "Lib.Vec.Lemmas",
"short_module": "VecLemmas"
},
{
"abbrev": true,
"full_module": "Lib.LoopCombinators",
"short_module": "Loops"
},
{
"abbrev": false,
"full_module": "Lib.IntVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteSequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 50,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
b: Hacl.Spec.Poly1305.Equiv.block_v 2 ->
acc0: Hacl.Spec.Poly1305.Vec.pfelem ->
r: Hacl.Spec.Poly1305.Vec.pfelem
-> FStar.Pervasives.Lemma
(ensures
Hacl.Spec.Poly1305.Vec.normalize_n r (Hacl.Spec.Poly1305.Vec.load_acc b acc0) ==
Lib.Sequence.repeat_blocks_multi Hacl.Spec.Poly1305.Vec.size_block
b
(Spec.Poly1305.poly1305_update1 r Hacl.Spec.Poly1305.Vec.size_block)
acc0) | FStar.Pervasives.Lemma | [
"lemma"
] | [] | [
"Hacl.Spec.Poly1305.Equiv.block_v",
"Hacl.Spec.Poly1305.Vec.pfelem",
"Hacl.Spec.Poly1305.Lemmas.poly_update_multi_lemma_load2_simplify",
"Prims.unit",
"Lib.LoopCombinators.eq_repeati0",
"Spec.Poly1305.felem",
"Lib.LoopCombinators.unfold_repeati",
"Lib.Sequence.lemma_repeat_blocks_multi",
"Lib.IntTypes.uint_t",
"Lib.IntTypes.U8",
"Lib.IntTypes.SEC",
"Hacl.Spec.Poly1305.Vec.size_block",
"Prims.nat",
"Prims.b2t",
"Prims.op_LessThan",
"Lib.Sequence.repeat_blocks_f",
"Prims.int",
"Prims.op_Division",
"FStar.Mul.op_Star",
"Lib.Sequence.lseq",
"Lib.IntTypes.int_t",
"Spec.Poly1305.poly1305_update1",
"FStar.Math.Lemmas.modulo_lemma",
"Hacl.Spec.Poly1305.Vec.prime",
"Hacl.Spec.Poly1305.Vec.pfadd",
"Prims.pow2",
"Lib.ByteSequence.nat_from_bytes_le",
"FStar.Seq.Base.seq",
"FStar.Seq.Base.slice"
] | [] | true | false | true | false | false | let load_acc_lemma2 b acc0 r =
| let b0 = Seq.slice b 0 size_block in
let b1 = Seq.slice b size_block (2 * size_block) in
let c0 = pfadd (pow2 128) (nat_from_bytes_le b0) in
let c1 = pfadd (pow2 128) (nat_from_bytes_le b1) in
FStar.Math.Lemmas.modulo_lemma c1 prime;
let f = S.poly1305_update1 r size_block in
let nb = (2 * size_block) / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 1;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0;
Lemmas.poly_update_multi_lemma_load2_simplify acc0 r c0 c1 | false |
Hacl.Spec.Poly1305.Equiv.fst | Hacl.Spec.Poly1305.Equiv.poly_update_nblocks_lemma | val poly_update_nblocks_lemma: #w:lanes -> r:pfelem -> b:block_v w -> acc_v0:elem w -> Lemma
(let rw = compute_rw #w r in
normalize_n r (poly1305_update_nblocks rw b acc_v0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) (normalize_n r acc_v0)) | val poly_update_nblocks_lemma: #w:lanes -> r:pfelem -> b:block_v w -> acc_v0:elem w -> Lemma
(let rw = compute_rw #w r in
normalize_n r (poly1305_update_nblocks rw b acc_v0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) (normalize_n r acc_v0)) | let poly_update_nblocks_lemma #w r b acc_v0 =
match w with
| 1 -> poly_update_nblocks_lemma1 r b acc_v0
| 2 -> poly_update_nblocks_lemma2 r b acc_v0
| 4 -> poly_update_nblocks_lemma4 r b acc_v0 | {
"file_name": "code/poly1305/Hacl.Spec.Poly1305.Equiv.fst",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 46,
"end_line": 196,
"start_col": 0,
"start_line": 192
} | module Hacl.Spec.Poly1305.Equiv
open FStar.Mul
open Lib.IntTypes
open Lib.Sequence
open Lib.ByteSequence
open Lib.IntVector
module Loops = Lib.LoopCombinators
module VecLemmas = Lib.Vec.Lemmas
module SeqLemmas = Lib.Sequence.Lemmas
module Lemmas = Hacl.Spec.Poly1305.Lemmas
module S = Spec.Poly1305
include Hacl.Spec.Poly1305.Vec
#set-options "--z3rlimit 50 --max_fuel 0 --max_ifuel 0"
let block_v (w:lanes{w * size_block <= max_size_t}) = lbytes (w * size_block)
///
/// val load_acc_lemma: #w:lanes -> b:block_v w -> acc0:pfelem -> r:pfelem -> Lemma
/// (normalize_n r (load_acc b acc0) == repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
///
val load_acc_lemma1: b:block_v 1 -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc #1 b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
let load_acc_lemma1 b acc0 r =
let f = S.poly1305_update1 r size_block in
let nb = size_block / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0
val load_acc_lemma2: b:block_v 2 -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
let load_acc_lemma2 b acc0 r =
let b0 = Seq.slice b 0 size_block in
let b1 = Seq.slice b size_block (2 * size_block) in
let c0 = pfadd (pow2 128) (nat_from_bytes_le b0) in
let c1 = pfadd (pow2 128) (nat_from_bytes_le b1) in
FStar.Math.Lemmas.modulo_lemma c1 prime;
let f = S.poly1305_update1 r size_block in
let nb = (2 * size_block) / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 1;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0;
Lemmas.poly_update_multi_lemma_load2_simplify acc0 r c0 c1
val load_acc_lemma4: b:block_v 4 -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
let load_acc_lemma4 b acc0 r =
let b0 = Seq.slice b 0 size_block in
let b1 = Seq.slice b size_block (2 * size_block) in
let b2 = Seq.slice b (2 * size_block) (3 * size_block) in
let b3 = Seq.slice b (3 * size_block) (4 * size_block) in
let c0 = pfadd (pow2 128) (nat_from_bytes_le b0) in
let c1 = pfadd (pow2 128) (nat_from_bytes_le b1) in
let c2 = pfadd (pow2 128) (nat_from_bytes_le b2) in
let c3 = pfadd (pow2 128) (nat_from_bytes_le b3) in
FStar.Math.Lemmas.modulo_lemma c1 prime;
FStar.Math.Lemmas.modulo_lemma c2 prime;
FStar.Math.Lemmas.modulo_lemma c3 prime;
let f = S.poly1305_update1 r size_block in
let nb = (4 * size_block) / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 3;
Loops.unfold_repeati nb repeat_f acc0 2;
Loops.unfold_repeati nb repeat_f acc0 1;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0;
Lemmas.poly_update_multi_lemma_load4_simplify acc0 r c0 c1 c2 c3
val load_acc_lemma: #w:lanes -> b:block_v w -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
let load_acc_lemma #w b acc0 r =
match w with
| 1 -> load_acc_lemma1 b acc0 r
| 2 -> load_acc_lemma2 b acc0 r
| 4 -> load_acc_lemma4 b acc0 r
///
/// val poly_update_nblocks_lemma: #w:lanes -> r:pfelem -> b:block_v w -> acc_v0:elem w -> Lemma
/// (let rw = compute_rw r in
/// normalize_n r (poly_update_nblocks #w rw b acc_v0) ==
/// repeat_blocks_multi size_block b (poly_update1 r) (normalize_n r acc_v0))
///
val poly_update_nblocks_lemma1: r:pfelem -> b:block_v 1 -> acc_v0:elem 1 -> Lemma
(let rw = compute_rw #1 r in
normalize_n r (poly1305_update_nblocks rw b acc_v0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) (normalize_n r acc_v0))
let poly_update_nblocks_lemma1 r b acc_v0 =
let acc0 = normalize_n r acc_v0 in
let f = S.poly1305_update1 r size_block in
let nb = size_block / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0
val poly_update_nblocks_lemma2: r:pfelem -> b:block_v 2 -> acc_v0:elem 2 -> Lemma
(let rw = compute_rw #2 r in
normalize_n r (poly1305_update_nblocks rw b acc_v0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) (normalize_n r acc_v0))
let poly_update_nblocks_lemma2 r b acc_v0 =
let acc0 = normalize_n r acc_v0 in
let b0 = Seq.slice b 0 size_block in
let b1 = Seq.slice b size_block (2 * size_block) in
let c0 = pfadd (pow2 128) (nat_from_bytes_le b0) in
let c1 = pfadd (pow2 128) (nat_from_bytes_le b1) in
let f = S.poly1305_update1 r size_block in
let nb = (2 * size_block) / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 1;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0;
Lemmas.poly_update_multi_lemma_load2_simplify acc0 r c0 c1;
Lemmas.poly_update_repeat_blocks_multi_lemma2_simplify acc_v0.[0] acc_v0.[1] c0 c1 r
val poly_update_nblocks_lemma4: r:pfelem -> b:block_v 4 -> acc_v0:elem 4 -> Lemma
(let rw = compute_rw #4 r in
normalize_n r (poly1305_update_nblocks rw b acc_v0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) (normalize_n r acc_v0))
let poly_update_nblocks_lemma4 r b acc_v0 =
let acc0 = normalize_n r acc_v0 in
let b0 = Seq.slice b 0 size_block in
let b1 = Seq.slice b size_block (2 * size_block) in
let b2 = Seq.slice b (2 * size_block) (3 * size_block) in
let b3 = Seq.slice b (3 * size_block) (4 * size_block) in
let c0 = pfadd (pow2 128) (nat_from_bytes_le b0) in
let c1 = pfadd (pow2 128) (nat_from_bytes_le b1) in
let c2 = pfadd (pow2 128) (nat_from_bytes_le b2) in
let c3 = pfadd (pow2 128) (nat_from_bytes_le b3) in
let r2 = pfmul r r in
let r4 = pfmul r2 r2 in
let f = S.poly1305_update1 r size_block in
let nb = (4 * size_block) / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 3;
Loops.unfold_repeati nb repeat_f acc0 2;
Loops.unfold_repeati nb repeat_f acc0 1;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0;
Lemmas.poly_update_repeat_blocks_multi_lemma4_simplify
acc_v0.[0] acc_v0.[1] acc_v0.[2] acc_v0.[3] c0 c1 c2 c3 r r2 r4
val poly_update_nblocks_lemma: #w:lanes -> r:pfelem -> b:block_v w -> acc_v0:elem w -> Lemma
(let rw = compute_rw #w r in
normalize_n r (poly1305_update_nblocks rw b acc_v0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) (normalize_n r acc_v0)) | {
"checked_file": "/",
"dependencies": [
"Spec.Poly1305.fst.checked",
"prims.fst.checked",
"Lib.Vec.Lemmas.fsti.checked",
"Lib.Sequence.Lemmas.fsti.checked",
"Lib.Sequence.fsti.checked",
"Lib.LoopCombinators.fsti.checked",
"Lib.IntVector.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteSequence.fsti.checked",
"Hacl.Spec.Poly1305.Vec.fst.checked",
"Hacl.Spec.Poly1305.Lemmas.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "Hacl.Spec.Poly1305.Equiv.fst"
} | [
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Vec",
"short_module": null
},
{
"abbrev": true,
"full_module": "Spec.Poly1305",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Hacl.Spec.Poly1305.Lemmas",
"short_module": "Lemmas"
},
{
"abbrev": true,
"full_module": "Lib.Sequence.Lemmas",
"short_module": "SeqLemmas"
},
{
"abbrev": true,
"full_module": "Lib.Vec.Lemmas",
"short_module": "VecLemmas"
},
{
"abbrev": true,
"full_module": "Lib.LoopCombinators",
"short_module": "Loops"
},
{
"abbrev": false,
"full_module": "Lib.IntVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteSequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 50,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
r: Hacl.Spec.Poly1305.Vec.pfelem ->
b: Hacl.Spec.Poly1305.Equiv.block_v w ->
acc_v0: Hacl.Spec.Poly1305.Vec.elem w
-> FStar.Pervasives.Lemma
(ensures
(let rw = Hacl.Spec.Poly1305.Vec.compute_rw r in
Hacl.Spec.Poly1305.Vec.normalize_n r
(Hacl.Spec.Poly1305.Vec.poly1305_update_nblocks rw b acc_v0) ==
Lib.Sequence.repeat_blocks_multi Hacl.Spec.Poly1305.Vec.size_block
b
(Spec.Poly1305.poly1305_update1 r Hacl.Spec.Poly1305.Vec.size_block)
(Hacl.Spec.Poly1305.Vec.normalize_n r acc_v0))) | FStar.Pervasives.Lemma | [
"lemma"
] | [] | [
"Hacl.Spec.Poly1305.Vec.lanes",
"Hacl.Spec.Poly1305.Vec.pfelem",
"Hacl.Spec.Poly1305.Equiv.block_v",
"Hacl.Spec.Poly1305.Vec.elem",
"Hacl.Spec.Poly1305.Equiv.poly_update_nblocks_lemma1",
"Hacl.Spec.Poly1305.Equiv.poly_update_nblocks_lemma2",
"Hacl.Spec.Poly1305.Equiv.poly_update_nblocks_lemma4",
"Prims.unit"
] | [] | false | false | true | false | false | let poly_update_nblocks_lemma #w r b acc_v0 =
| match w with
| 1 -> poly_update_nblocks_lemma1 r b acc_v0
| 2 -> poly_update_nblocks_lemma2 r b acc_v0
| 4 -> poly_update_nblocks_lemma4 r b acc_v0 | false |
Hacl.Spec.Poly1305.Equiv.fst | Hacl.Spec.Poly1305.Equiv.poly_update_multi_lemma_v | val poly_update_multi_lemma_v:
#w:lanes
-> text:bytes{length text % (w * size_block) = 0 /\ length text % size_block = 0}
-> acc_v0:elem w
-> r:pfelem -> Lemma
(let rw = compute_rw #w r in
let f = S.poly1305_update1 r size_block in
let f_v = poly1305_update_nblocks rw in
normalize_n r (repeat_blocks_multi (w * size_block) text f_v acc_v0) ==
repeat_blocks_multi size_block text f (normalize_n r acc_v0)) | val poly_update_multi_lemma_v:
#w:lanes
-> text:bytes{length text % (w * size_block) = 0 /\ length text % size_block = 0}
-> acc_v0:elem w
-> r:pfelem -> Lemma
(let rw = compute_rw #w r in
let f = S.poly1305_update1 r size_block in
let f_v = poly1305_update_nblocks rw in
normalize_n r (repeat_blocks_multi (w * size_block) text f_v acc_v0) ==
repeat_blocks_multi size_block text f (normalize_n r acc_v0)) | let poly_update_multi_lemma_v #w text acc_v0 r =
let rw = compute_rw #w r in
let f = S.poly1305_update1 r size_block in
let f_v = poly1305_update_nblocks rw in
Classical.forall_intro_2 (repeat_blocks_multi_vec_equiv_pre_lemma #w r);
VecLemmas.lemma_repeat_blocks_multi_vec w size_block text f f_v (normalize_n r) acc_v0 | {
"file_name": "code/poly1305/Hacl.Spec.Poly1305.Equiv.fst",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 88,
"end_line": 226,
"start_col": 0,
"start_line": 220
} | module Hacl.Spec.Poly1305.Equiv
open FStar.Mul
open Lib.IntTypes
open Lib.Sequence
open Lib.ByteSequence
open Lib.IntVector
module Loops = Lib.LoopCombinators
module VecLemmas = Lib.Vec.Lemmas
module SeqLemmas = Lib.Sequence.Lemmas
module Lemmas = Hacl.Spec.Poly1305.Lemmas
module S = Spec.Poly1305
include Hacl.Spec.Poly1305.Vec
#set-options "--z3rlimit 50 --max_fuel 0 --max_ifuel 0"
let block_v (w:lanes{w * size_block <= max_size_t}) = lbytes (w * size_block)
///
/// val load_acc_lemma: #w:lanes -> b:block_v w -> acc0:pfelem -> r:pfelem -> Lemma
/// (normalize_n r (load_acc b acc0) == repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
///
val load_acc_lemma1: b:block_v 1 -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc #1 b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
let load_acc_lemma1 b acc0 r =
let f = S.poly1305_update1 r size_block in
let nb = size_block / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0
val load_acc_lemma2: b:block_v 2 -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
let load_acc_lemma2 b acc0 r =
let b0 = Seq.slice b 0 size_block in
let b1 = Seq.slice b size_block (2 * size_block) in
let c0 = pfadd (pow2 128) (nat_from_bytes_le b0) in
let c1 = pfadd (pow2 128) (nat_from_bytes_le b1) in
FStar.Math.Lemmas.modulo_lemma c1 prime;
let f = S.poly1305_update1 r size_block in
let nb = (2 * size_block) / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 1;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0;
Lemmas.poly_update_multi_lemma_load2_simplify acc0 r c0 c1
val load_acc_lemma4: b:block_v 4 -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
let load_acc_lemma4 b acc0 r =
let b0 = Seq.slice b 0 size_block in
let b1 = Seq.slice b size_block (2 * size_block) in
let b2 = Seq.slice b (2 * size_block) (3 * size_block) in
let b3 = Seq.slice b (3 * size_block) (4 * size_block) in
let c0 = pfadd (pow2 128) (nat_from_bytes_le b0) in
let c1 = pfadd (pow2 128) (nat_from_bytes_le b1) in
let c2 = pfadd (pow2 128) (nat_from_bytes_le b2) in
let c3 = pfadd (pow2 128) (nat_from_bytes_le b3) in
FStar.Math.Lemmas.modulo_lemma c1 prime;
FStar.Math.Lemmas.modulo_lemma c2 prime;
FStar.Math.Lemmas.modulo_lemma c3 prime;
let f = S.poly1305_update1 r size_block in
let nb = (4 * size_block) / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 3;
Loops.unfold_repeati nb repeat_f acc0 2;
Loops.unfold_repeati nb repeat_f acc0 1;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0;
Lemmas.poly_update_multi_lemma_load4_simplify acc0 r c0 c1 c2 c3
val load_acc_lemma: #w:lanes -> b:block_v w -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
let load_acc_lemma #w b acc0 r =
match w with
| 1 -> load_acc_lemma1 b acc0 r
| 2 -> load_acc_lemma2 b acc0 r
| 4 -> load_acc_lemma4 b acc0 r
///
/// val poly_update_nblocks_lemma: #w:lanes -> r:pfelem -> b:block_v w -> acc_v0:elem w -> Lemma
/// (let rw = compute_rw r in
/// normalize_n r (poly_update_nblocks #w rw b acc_v0) ==
/// repeat_blocks_multi size_block b (poly_update1 r) (normalize_n r acc_v0))
///
val poly_update_nblocks_lemma1: r:pfelem -> b:block_v 1 -> acc_v0:elem 1 -> Lemma
(let rw = compute_rw #1 r in
normalize_n r (poly1305_update_nblocks rw b acc_v0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) (normalize_n r acc_v0))
let poly_update_nblocks_lemma1 r b acc_v0 =
let acc0 = normalize_n r acc_v0 in
let f = S.poly1305_update1 r size_block in
let nb = size_block / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0
val poly_update_nblocks_lemma2: r:pfelem -> b:block_v 2 -> acc_v0:elem 2 -> Lemma
(let rw = compute_rw #2 r in
normalize_n r (poly1305_update_nblocks rw b acc_v0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) (normalize_n r acc_v0))
let poly_update_nblocks_lemma2 r b acc_v0 =
let acc0 = normalize_n r acc_v0 in
let b0 = Seq.slice b 0 size_block in
let b1 = Seq.slice b size_block (2 * size_block) in
let c0 = pfadd (pow2 128) (nat_from_bytes_le b0) in
let c1 = pfadd (pow2 128) (nat_from_bytes_le b1) in
let f = S.poly1305_update1 r size_block in
let nb = (2 * size_block) / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 1;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0;
Lemmas.poly_update_multi_lemma_load2_simplify acc0 r c0 c1;
Lemmas.poly_update_repeat_blocks_multi_lemma2_simplify acc_v0.[0] acc_v0.[1] c0 c1 r
val poly_update_nblocks_lemma4: r:pfelem -> b:block_v 4 -> acc_v0:elem 4 -> Lemma
(let rw = compute_rw #4 r in
normalize_n r (poly1305_update_nblocks rw b acc_v0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) (normalize_n r acc_v0))
let poly_update_nblocks_lemma4 r b acc_v0 =
let acc0 = normalize_n r acc_v0 in
let b0 = Seq.slice b 0 size_block in
let b1 = Seq.slice b size_block (2 * size_block) in
let b2 = Seq.slice b (2 * size_block) (3 * size_block) in
let b3 = Seq.slice b (3 * size_block) (4 * size_block) in
let c0 = pfadd (pow2 128) (nat_from_bytes_le b0) in
let c1 = pfadd (pow2 128) (nat_from_bytes_le b1) in
let c2 = pfadd (pow2 128) (nat_from_bytes_le b2) in
let c3 = pfadd (pow2 128) (nat_from_bytes_le b3) in
let r2 = pfmul r r in
let r4 = pfmul r2 r2 in
let f = S.poly1305_update1 r size_block in
let nb = (4 * size_block) / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 3;
Loops.unfold_repeati nb repeat_f acc0 2;
Loops.unfold_repeati nb repeat_f acc0 1;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0;
Lemmas.poly_update_repeat_blocks_multi_lemma4_simplify
acc_v0.[0] acc_v0.[1] acc_v0.[2] acc_v0.[3] c0 c1 c2 c3 r r2 r4
val poly_update_nblocks_lemma: #w:lanes -> r:pfelem -> b:block_v w -> acc_v0:elem w -> Lemma
(let rw = compute_rw #w r in
normalize_n r (poly1305_update_nblocks rw b acc_v0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) (normalize_n r acc_v0))
let poly_update_nblocks_lemma #w r b acc_v0 =
match w with
| 1 -> poly_update_nblocks_lemma1 r b acc_v0
| 2 -> poly_update_nblocks_lemma2 r b acc_v0
| 4 -> poly_update_nblocks_lemma4 r b acc_v0
val repeat_blocks_multi_vec_equiv_pre_lemma: #w:lanes -> r:pfelem -> b:block_v w -> acc_v0:elem w -> Lemma
(let rw = compute_rw #w r in
let f = S.poly1305_update1 r size_block in
let f_v = poly1305_update_nblocks rw in
VecLemmas.repeat_blocks_multi_vec_equiv_pre w size_block f f_v (normalize_n r) b acc_v0)
let repeat_blocks_multi_vec_equiv_pre_lemma #w r b acc_v0 =
poly_update_nblocks_lemma #w r b acc_v0
val poly_update_multi_lemma_v:
#w:lanes
-> text:bytes{length text % (w * size_block) = 0 /\ length text % size_block = 0}
-> acc_v0:elem w
-> r:pfelem -> Lemma
(let rw = compute_rw #w r in
let f = S.poly1305_update1 r size_block in
let f_v = poly1305_update_nblocks rw in
normalize_n r (repeat_blocks_multi (w * size_block) text f_v acc_v0) ==
repeat_blocks_multi size_block text f (normalize_n r acc_v0)) | {
"checked_file": "/",
"dependencies": [
"Spec.Poly1305.fst.checked",
"prims.fst.checked",
"Lib.Vec.Lemmas.fsti.checked",
"Lib.Sequence.Lemmas.fsti.checked",
"Lib.Sequence.fsti.checked",
"Lib.LoopCombinators.fsti.checked",
"Lib.IntVector.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteSequence.fsti.checked",
"Hacl.Spec.Poly1305.Vec.fst.checked",
"Hacl.Spec.Poly1305.Lemmas.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "Hacl.Spec.Poly1305.Equiv.fst"
} | [
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Vec",
"short_module": null
},
{
"abbrev": true,
"full_module": "Spec.Poly1305",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Hacl.Spec.Poly1305.Lemmas",
"short_module": "Lemmas"
},
{
"abbrev": true,
"full_module": "Lib.Sequence.Lemmas",
"short_module": "SeqLemmas"
},
{
"abbrev": true,
"full_module": "Lib.Vec.Lemmas",
"short_module": "VecLemmas"
},
{
"abbrev": true,
"full_module": "Lib.LoopCombinators",
"short_module": "Loops"
},
{
"abbrev": false,
"full_module": "Lib.IntVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteSequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 50,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
text:
Lib.ByteSequence.bytes
{ Lib.Sequence.length text % (w * Hacl.Spec.Poly1305.Vec.size_block) = 0 /\
Lib.Sequence.length text % Hacl.Spec.Poly1305.Vec.size_block = 0 } ->
acc_v0: Hacl.Spec.Poly1305.Vec.elem w ->
r: Hacl.Spec.Poly1305.Vec.pfelem
-> FStar.Pervasives.Lemma
(ensures
(let rw = Hacl.Spec.Poly1305.Vec.compute_rw r in
let f = Spec.Poly1305.poly1305_update1 r Hacl.Spec.Poly1305.Vec.size_block in
let f_v = Hacl.Spec.Poly1305.Vec.poly1305_update_nblocks rw in
Hacl.Spec.Poly1305.Vec.normalize_n r
(Lib.Sequence.repeat_blocks_multi (w * Hacl.Spec.Poly1305.Vec.size_block) text f_v acc_v0) ==
Lib.Sequence.repeat_blocks_multi Hacl.Spec.Poly1305.Vec.size_block
text
f
(Hacl.Spec.Poly1305.Vec.normalize_n r acc_v0))) | FStar.Pervasives.Lemma | [
"lemma"
] | [] | [
"Hacl.Spec.Poly1305.Vec.lanes",
"Lib.ByteSequence.bytes",
"Prims.l_and",
"Prims.b2t",
"Prims.op_Equality",
"Prims.int",
"Prims.op_Modulus",
"Lib.Sequence.length",
"Lib.IntTypes.uint_t",
"Lib.IntTypes.U8",
"Lib.IntTypes.SEC",
"FStar.Mul.op_Star",
"Hacl.Spec.Poly1305.Vec.size_block",
"Hacl.Spec.Poly1305.Vec.elem",
"Hacl.Spec.Poly1305.Vec.pfelem",
"Lib.Vec.Lemmas.lemma_repeat_blocks_multi_vec",
"Spec.Poly1305.felem",
"Hacl.Spec.Poly1305.Vec.normalize_n",
"Prims.unit",
"FStar.Classical.forall_intro_2",
"Hacl.Spec.Poly1305.Equiv.block_v",
"Lib.Vec.Lemmas.repeat_blocks_multi_vec_equiv_pre",
"Spec.Poly1305.poly1305_update1",
"Hacl.Spec.Poly1305.Vec.poly1305_update_nblocks",
"Hacl.Spec.Poly1305.Vec.compute_rw",
"Hacl.Spec.Poly1305.Equiv.repeat_blocks_multi_vec_equiv_pre_lemma",
"Lib.Sequence.lseq",
"Lib.IntTypes.int_t",
"Prims.op_Multiply"
] | [] | false | false | true | false | false | let poly_update_multi_lemma_v #w text acc_v0 r =
| let rw = compute_rw #w r in
let f = S.poly1305_update1 r size_block in
let f_v = poly1305_update_nblocks rw in
Classical.forall_intro_2 (repeat_blocks_multi_vec_equiv_pre_lemma #w r);
VecLemmas.lemma_repeat_blocks_multi_vec w size_block text f f_v (normalize_n r) acc_v0 | false |
Hacl.Spec.Poly1305.Equiv.fst | Hacl.Spec.Poly1305.Equiv.load_acc_lemma1 | val load_acc_lemma1: b:block_v 1 -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc #1 b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0) | val load_acc_lemma1: b:block_v 1 -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc #1 b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0) | let load_acc_lemma1 b acc0 r =
let f = S.poly1305_update1 r size_block in
let nb = size_block / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0 | {
"file_name": "code/poly1305/Hacl.Spec.Poly1305.Equiv.fst",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 36,
"end_line": 37,
"start_col": 0,
"start_line": 30
} | module Hacl.Spec.Poly1305.Equiv
open FStar.Mul
open Lib.IntTypes
open Lib.Sequence
open Lib.ByteSequence
open Lib.IntVector
module Loops = Lib.LoopCombinators
module VecLemmas = Lib.Vec.Lemmas
module SeqLemmas = Lib.Sequence.Lemmas
module Lemmas = Hacl.Spec.Poly1305.Lemmas
module S = Spec.Poly1305
include Hacl.Spec.Poly1305.Vec
#set-options "--z3rlimit 50 --max_fuel 0 --max_ifuel 0"
let block_v (w:lanes{w * size_block <= max_size_t}) = lbytes (w * size_block)
///
/// val load_acc_lemma: #w:lanes -> b:block_v w -> acc0:pfelem -> r:pfelem -> Lemma
/// (normalize_n r (load_acc b acc0) == repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
///
val load_acc_lemma1: b:block_v 1 -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc #1 b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0) | {
"checked_file": "/",
"dependencies": [
"Spec.Poly1305.fst.checked",
"prims.fst.checked",
"Lib.Vec.Lemmas.fsti.checked",
"Lib.Sequence.Lemmas.fsti.checked",
"Lib.Sequence.fsti.checked",
"Lib.LoopCombinators.fsti.checked",
"Lib.IntVector.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteSequence.fsti.checked",
"Hacl.Spec.Poly1305.Vec.fst.checked",
"Hacl.Spec.Poly1305.Lemmas.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "Hacl.Spec.Poly1305.Equiv.fst"
} | [
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Vec",
"short_module": null
},
{
"abbrev": true,
"full_module": "Spec.Poly1305",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Hacl.Spec.Poly1305.Lemmas",
"short_module": "Lemmas"
},
{
"abbrev": true,
"full_module": "Lib.Sequence.Lemmas",
"short_module": "SeqLemmas"
},
{
"abbrev": true,
"full_module": "Lib.Vec.Lemmas",
"short_module": "VecLemmas"
},
{
"abbrev": true,
"full_module": "Lib.LoopCombinators",
"short_module": "Loops"
},
{
"abbrev": false,
"full_module": "Lib.IntVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteSequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 50,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
b: Hacl.Spec.Poly1305.Equiv.block_v 1 ->
acc0: Hacl.Spec.Poly1305.Vec.pfelem ->
r: Hacl.Spec.Poly1305.Vec.pfelem
-> FStar.Pervasives.Lemma
(ensures
Hacl.Spec.Poly1305.Vec.normalize_n r (Hacl.Spec.Poly1305.Vec.load_acc b acc0) ==
Lib.Sequence.repeat_blocks_multi Hacl.Spec.Poly1305.Vec.size_block
b
(Spec.Poly1305.poly1305_update1 r Hacl.Spec.Poly1305.Vec.size_block)
acc0) | FStar.Pervasives.Lemma | [
"lemma"
] | [] | [
"Hacl.Spec.Poly1305.Equiv.block_v",
"Hacl.Spec.Poly1305.Vec.pfelem",
"Lib.LoopCombinators.eq_repeati0",
"Spec.Poly1305.felem",
"Prims.unit",
"Lib.LoopCombinators.unfold_repeati",
"Lib.Sequence.lemma_repeat_blocks_multi",
"Lib.IntTypes.uint_t",
"Lib.IntTypes.U8",
"Lib.IntTypes.SEC",
"Hacl.Spec.Poly1305.Vec.size_block",
"Prims.nat",
"Prims.b2t",
"Prims.op_LessThan",
"Lib.Sequence.repeat_blocks_f",
"Prims.int",
"Prims.op_Division",
"Lib.Sequence.lseq",
"Lib.IntTypes.int_t",
"Spec.Poly1305.poly1305_update1"
] | [] | true | false | true | false | false | let load_acc_lemma1 b acc0 r =
| let f = S.poly1305_update1 r size_block in
let nb = size_block / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0 | false |
Hacl.Spec.Poly1305.Equiv.fst | Hacl.Spec.Poly1305.Equiv.poly_update_nblocks_lemma1 | val poly_update_nblocks_lemma1: r:pfelem -> b:block_v 1 -> acc_v0:elem 1 -> Lemma
(let rw = compute_rw #1 r in
normalize_n r (poly1305_update_nblocks rw b acc_v0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) (normalize_n r acc_v0)) | val poly_update_nblocks_lemma1: r:pfelem -> b:block_v 1 -> acc_v0:elem 1 -> Lemma
(let rw = compute_rw #1 r in
normalize_n r (poly1305_update_nblocks rw b acc_v0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) (normalize_n r acc_v0)) | let poly_update_nblocks_lemma1 r b acc_v0 =
let acc0 = normalize_n r acc_v0 in
let f = S.poly1305_update1 r size_block in
let nb = size_block / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0 | {
"file_name": "code/poly1305/Hacl.Spec.Poly1305.Equiv.fst",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 36,
"end_line": 125,
"start_col": 0,
"start_line": 117
} | module Hacl.Spec.Poly1305.Equiv
open FStar.Mul
open Lib.IntTypes
open Lib.Sequence
open Lib.ByteSequence
open Lib.IntVector
module Loops = Lib.LoopCombinators
module VecLemmas = Lib.Vec.Lemmas
module SeqLemmas = Lib.Sequence.Lemmas
module Lemmas = Hacl.Spec.Poly1305.Lemmas
module S = Spec.Poly1305
include Hacl.Spec.Poly1305.Vec
#set-options "--z3rlimit 50 --max_fuel 0 --max_ifuel 0"
let block_v (w:lanes{w * size_block <= max_size_t}) = lbytes (w * size_block)
///
/// val load_acc_lemma: #w:lanes -> b:block_v w -> acc0:pfelem -> r:pfelem -> Lemma
/// (normalize_n r (load_acc b acc0) == repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
///
val load_acc_lemma1: b:block_v 1 -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc #1 b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
let load_acc_lemma1 b acc0 r =
let f = S.poly1305_update1 r size_block in
let nb = size_block / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0
val load_acc_lemma2: b:block_v 2 -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
let load_acc_lemma2 b acc0 r =
let b0 = Seq.slice b 0 size_block in
let b1 = Seq.slice b size_block (2 * size_block) in
let c0 = pfadd (pow2 128) (nat_from_bytes_le b0) in
let c1 = pfadd (pow2 128) (nat_from_bytes_le b1) in
FStar.Math.Lemmas.modulo_lemma c1 prime;
let f = S.poly1305_update1 r size_block in
let nb = (2 * size_block) / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 1;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0;
Lemmas.poly_update_multi_lemma_load2_simplify acc0 r c0 c1
val load_acc_lemma4: b:block_v 4 -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
let load_acc_lemma4 b acc0 r =
let b0 = Seq.slice b 0 size_block in
let b1 = Seq.slice b size_block (2 * size_block) in
let b2 = Seq.slice b (2 * size_block) (3 * size_block) in
let b3 = Seq.slice b (3 * size_block) (4 * size_block) in
let c0 = pfadd (pow2 128) (nat_from_bytes_le b0) in
let c1 = pfadd (pow2 128) (nat_from_bytes_le b1) in
let c2 = pfadd (pow2 128) (nat_from_bytes_le b2) in
let c3 = pfadd (pow2 128) (nat_from_bytes_le b3) in
FStar.Math.Lemmas.modulo_lemma c1 prime;
FStar.Math.Lemmas.modulo_lemma c2 prime;
FStar.Math.Lemmas.modulo_lemma c3 prime;
let f = S.poly1305_update1 r size_block in
let nb = (4 * size_block) / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 3;
Loops.unfold_repeati nb repeat_f acc0 2;
Loops.unfold_repeati nb repeat_f acc0 1;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0;
Lemmas.poly_update_multi_lemma_load4_simplify acc0 r c0 c1 c2 c3
val load_acc_lemma: #w:lanes -> b:block_v w -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
let load_acc_lemma #w b acc0 r =
match w with
| 1 -> load_acc_lemma1 b acc0 r
| 2 -> load_acc_lemma2 b acc0 r
| 4 -> load_acc_lemma4 b acc0 r
///
/// val poly_update_nblocks_lemma: #w:lanes -> r:pfelem -> b:block_v w -> acc_v0:elem w -> Lemma
/// (let rw = compute_rw r in
/// normalize_n r (poly_update_nblocks #w rw b acc_v0) ==
/// repeat_blocks_multi size_block b (poly_update1 r) (normalize_n r acc_v0))
///
val poly_update_nblocks_lemma1: r:pfelem -> b:block_v 1 -> acc_v0:elem 1 -> Lemma
(let rw = compute_rw #1 r in
normalize_n r (poly1305_update_nblocks rw b acc_v0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) (normalize_n r acc_v0)) | {
"checked_file": "/",
"dependencies": [
"Spec.Poly1305.fst.checked",
"prims.fst.checked",
"Lib.Vec.Lemmas.fsti.checked",
"Lib.Sequence.Lemmas.fsti.checked",
"Lib.Sequence.fsti.checked",
"Lib.LoopCombinators.fsti.checked",
"Lib.IntVector.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteSequence.fsti.checked",
"Hacl.Spec.Poly1305.Vec.fst.checked",
"Hacl.Spec.Poly1305.Lemmas.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "Hacl.Spec.Poly1305.Equiv.fst"
} | [
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Vec",
"short_module": null
},
{
"abbrev": true,
"full_module": "Spec.Poly1305",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Hacl.Spec.Poly1305.Lemmas",
"short_module": "Lemmas"
},
{
"abbrev": true,
"full_module": "Lib.Sequence.Lemmas",
"short_module": "SeqLemmas"
},
{
"abbrev": true,
"full_module": "Lib.Vec.Lemmas",
"short_module": "VecLemmas"
},
{
"abbrev": true,
"full_module": "Lib.LoopCombinators",
"short_module": "Loops"
},
{
"abbrev": false,
"full_module": "Lib.IntVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteSequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 50,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
r: Hacl.Spec.Poly1305.Vec.pfelem ->
b: Hacl.Spec.Poly1305.Equiv.block_v 1 ->
acc_v0: Hacl.Spec.Poly1305.Vec.elem 1
-> FStar.Pervasives.Lemma
(ensures
(let rw = Hacl.Spec.Poly1305.Vec.compute_rw r in
Hacl.Spec.Poly1305.Vec.normalize_n r
(Hacl.Spec.Poly1305.Vec.poly1305_update_nblocks rw b acc_v0) ==
Lib.Sequence.repeat_blocks_multi Hacl.Spec.Poly1305.Vec.size_block
b
(Spec.Poly1305.poly1305_update1 r Hacl.Spec.Poly1305.Vec.size_block)
(Hacl.Spec.Poly1305.Vec.normalize_n r acc_v0))) | FStar.Pervasives.Lemma | [
"lemma"
] | [] | [
"Hacl.Spec.Poly1305.Vec.pfelem",
"Hacl.Spec.Poly1305.Equiv.block_v",
"Hacl.Spec.Poly1305.Vec.elem",
"Lib.LoopCombinators.eq_repeati0",
"Spec.Poly1305.felem",
"Prims.unit",
"Lib.LoopCombinators.unfold_repeati",
"Lib.Sequence.lemma_repeat_blocks_multi",
"Lib.IntTypes.uint_t",
"Lib.IntTypes.U8",
"Lib.IntTypes.SEC",
"Hacl.Spec.Poly1305.Vec.size_block",
"Prims.nat",
"Prims.b2t",
"Prims.op_LessThan",
"Lib.Sequence.repeat_blocks_f",
"Prims.int",
"Prims.op_Division",
"Lib.Sequence.lseq",
"Lib.IntTypes.int_t",
"Spec.Poly1305.poly1305_update1",
"Hacl.Spec.Poly1305.Vec.normalize_n"
] | [] | true | false | true | false | false | let poly_update_nblocks_lemma1 r b acc_v0 =
| let acc0 = normalize_n r acc_v0 in
let f = S.poly1305_update1 r size_block in
let nb = size_block / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0 | false |
Vale.AES.PPC64LE.GF128_Mul.fst | Vale.AES.PPC64LE.GF128_Mul.va_code_ShiftLeft128_1 | val va_code_ShiftLeft128_1 : va_dummy:unit -> Tot va_code | val va_code_ShiftLeft128_1 : va_dummy:unit -> Tot va_code | let va_code_ShiftLeft128_1 () =
(va_Block (va_CCons (va_code_Vspltisb (va_op_vec_opr_vec 2) 1) (va_CCons (va_code_Vsl
(va_op_vec_opr_vec 1) (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 2)) (va_CNil ())))) | {
"file_name": "obj/Vale.AES.PPC64LE.GF128_Mul.fst",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 86,
"end_line": 28,
"start_col": 0,
"start_line": 26
} | module Vale.AES.PPC64LE.GF128_Mul
open Vale.Def.Types_s
open Vale.Arch.Types
open Vale.Arch.TypesNative
open Vale.Math.Poly2_s
open Vale.Math.Poly2
open Vale.Math.Poly2.Bits_s
open Vale.Math.Poly2.Bits
open Vale.Math.Poly2.Lemmas
open Vale.AES.GF128_s
open Vale.AES.GF128
open Vale.PPC64LE.Machine_s
open Vale.PPC64LE.State
open Vale.PPC64LE.Decls
open Vale.PPC64LE.InsBasic
open Vale.PPC64LE.InsMem
open Vale.PPC64LE.InsVector
open Vale.PPC64LE.QuickCode
open Vale.PPC64LE.QuickCodes
open Vale.AES.PPC64LE.PolyOps
open Vale.AES.Types_helpers
open Vale.AES.GHash_BE
//-- ShiftLeft128_1 | {
"checked_file": "/",
"dependencies": [
"Vale.PPC64LE.State.fsti.checked",
"Vale.PPC64LE.QuickCodes.fsti.checked",
"Vale.PPC64LE.QuickCode.fst.checked",
"Vale.PPC64LE.Machine_s.fst.checked",
"Vale.PPC64LE.InsVector.fsti.checked",
"Vale.PPC64LE.InsMem.fsti.checked",
"Vale.PPC64LE.InsBasic.fsti.checked",
"Vale.PPC64LE.Decls.fsti.checked",
"Vale.Math.Poly2_s.fsti.checked",
"Vale.Math.Poly2.Words.fsti.checked",
"Vale.Math.Poly2.Lemmas.fsti.checked",
"Vale.Math.Poly2.Bits_s.fsti.checked",
"Vale.Math.Poly2.Bits.fsti.checked",
"Vale.Math.Poly2.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Arch.TypesNative.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.Types_helpers.fsti.checked",
"Vale.AES.PPC64LE.PolyOps.fsti.checked",
"Vale.AES.GHash_BE.fsti.checked",
"Vale.AES.GF128_s.fsti.checked",
"Vale.AES.GF128.fsti.checked",
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.PPC64LE.GF128_Mul.fst"
} | [
{
"abbrev": false,
"full_module": "Vale.AES.GHash_BE",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.PPC64LE.PolyOps",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Bits",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Bits_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.TypesNative",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.PPC64LE",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.PPC64LE",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | va_dummy: Prims.unit -> Vale.PPC64LE.Decls.va_code | Prims.Tot | [
"total"
] | [] | [
"Prims.unit",
"Vale.PPC64LE.Decls.va_Block",
"Vale.PPC64LE.Decls.va_CCons",
"Vale.PPC64LE.InsVector.va_code_Vspltisb",
"Vale.PPC64LE.Decls.va_op_vec_opr_vec",
"Vale.PPC64LE.InsVector.va_code_Vsl",
"Vale.PPC64LE.Decls.va_CNil",
"Vale.PPC64LE.Decls.va_code"
] | [] | false | false | false | true | false | let va_code_ShiftLeft128_1 () =
| (va_Block (va_CCons (va_code_Vspltisb (va_op_vec_opr_vec 2) 1)
(va_CCons (va_code_Vsl (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 2))
(va_CNil ())))) | false |
Hacl.Spec.Poly1305.Equiv.fst | Hacl.Spec.Poly1305.Equiv.poly_update_multi_lemma | val poly_update_multi_lemma:
#w:lanes
-> text:bytes{w * size_block <= length text /\ length text % (w * size_block) = 0 /\ length text % size_block = 0}
-> acc0:pfelem
-> r:pfelem -> Lemma
(poly1305_update_multi #w text acc0 r ==
repeat_blocks_multi size_block text (S.poly1305_update1 r size_block) acc0) | val poly_update_multi_lemma:
#w:lanes
-> text:bytes{w * size_block <= length text /\ length text % (w * size_block) = 0 /\ length text % size_block = 0}
-> acc0:pfelem
-> r:pfelem -> Lemma
(poly1305_update_multi #w text acc0 r ==
repeat_blocks_multi size_block text (S.poly1305_update1 r size_block) acc0) | let poly_update_multi_lemma #w text acc0 r =
let len = length text in
let blocksize_v = w * size_block in
let text0 = Seq.slice text 0 blocksize_v in
let text1 = Seq.slice text blocksize_v len in
FStar.Math.Lemmas.modulo_addition_lemma len blocksize_v (- 1);
assert (length text1 % (w * size_block) = 0 /\ length text1 % size_block = 0);
let f = S.poly1305_update1 r size_block in
let acc_v0 = load_acc #w text0 acc0 in
let rp = poly1305_update_multi #w text acc0 r in
poly_update_multi_lemma_v #w text1 acc_v0 r;
load_acc_lemma #w text0 acc0 r;
SeqLemmas.repeat_blocks_multi_split size_block blocksize_v text f acc0 | {
"file_name": "code/poly1305/Hacl.Spec.Poly1305.Equiv.fst",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 72,
"end_line": 251,
"start_col": 0,
"start_line": 237
} | module Hacl.Spec.Poly1305.Equiv
open FStar.Mul
open Lib.IntTypes
open Lib.Sequence
open Lib.ByteSequence
open Lib.IntVector
module Loops = Lib.LoopCombinators
module VecLemmas = Lib.Vec.Lemmas
module SeqLemmas = Lib.Sequence.Lemmas
module Lemmas = Hacl.Spec.Poly1305.Lemmas
module S = Spec.Poly1305
include Hacl.Spec.Poly1305.Vec
#set-options "--z3rlimit 50 --max_fuel 0 --max_ifuel 0"
let block_v (w:lanes{w * size_block <= max_size_t}) = lbytes (w * size_block)
///
/// val load_acc_lemma: #w:lanes -> b:block_v w -> acc0:pfelem -> r:pfelem -> Lemma
/// (normalize_n r (load_acc b acc0) == repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
///
val load_acc_lemma1: b:block_v 1 -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc #1 b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
let load_acc_lemma1 b acc0 r =
let f = S.poly1305_update1 r size_block in
let nb = size_block / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0
val load_acc_lemma2: b:block_v 2 -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
let load_acc_lemma2 b acc0 r =
let b0 = Seq.slice b 0 size_block in
let b1 = Seq.slice b size_block (2 * size_block) in
let c0 = pfadd (pow2 128) (nat_from_bytes_le b0) in
let c1 = pfadd (pow2 128) (nat_from_bytes_le b1) in
FStar.Math.Lemmas.modulo_lemma c1 prime;
let f = S.poly1305_update1 r size_block in
let nb = (2 * size_block) / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 1;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0;
Lemmas.poly_update_multi_lemma_load2_simplify acc0 r c0 c1
val load_acc_lemma4: b:block_v 4 -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
let load_acc_lemma4 b acc0 r =
let b0 = Seq.slice b 0 size_block in
let b1 = Seq.slice b size_block (2 * size_block) in
let b2 = Seq.slice b (2 * size_block) (3 * size_block) in
let b3 = Seq.slice b (3 * size_block) (4 * size_block) in
let c0 = pfadd (pow2 128) (nat_from_bytes_le b0) in
let c1 = pfadd (pow2 128) (nat_from_bytes_le b1) in
let c2 = pfadd (pow2 128) (nat_from_bytes_le b2) in
let c3 = pfadd (pow2 128) (nat_from_bytes_le b3) in
FStar.Math.Lemmas.modulo_lemma c1 prime;
FStar.Math.Lemmas.modulo_lemma c2 prime;
FStar.Math.Lemmas.modulo_lemma c3 prime;
let f = S.poly1305_update1 r size_block in
let nb = (4 * size_block) / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 3;
Loops.unfold_repeati nb repeat_f acc0 2;
Loops.unfold_repeati nb repeat_f acc0 1;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0;
Lemmas.poly_update_multi_lemma_load4_simplify acc0 r c0 c1 c2 c3
val load_acc_lemma: #w:lanes -> b:block_v w -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
let load_acc_lemma #w b acc0 r =
match w with
| 1 -> load_acc_lemma1 b acc0 r
| 2 -> load_acc_lemma2 b acc0 r
| 4 -> load_acc_lemma4 b acc0 r
///
/// val poly_update_nblocks_lemma: #w:lanes -> r:pfelem -> b:block_v w -> acc_v0:elem w -> Lemma
/// (let rw = compute_rw r in
/// normalize_n r (poly_update_nblocks #w rw b acc_v0) ==
/// repeat_blocks_multi size_block b (poly_update1 r) (normalize_n r acc_v0))
///
val poly_update_nblocks_lemma1: r:pfelem -> b:block_v 1 -> acc_v0:elem 1 -> Lemma
(let rw = compute_rw #1 r in
normalize_n r (poly1305_update_nblocks rw b acc_v0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) (normalize_n r acc_v0))
let poly_update_nblocks_lemma1 r b acc_v0 =
let acc0 = normalize_n r acc_v0 in
let f = S.poly1305_update1 r size_block in
let nb = size_block / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0
val poly_update_nblocks_lemma2: r:pfelem -> b:block_v 2 -> acc_v0:elem 2 -> Lemma
(let rw = compute_rw #2 r in
normalize_n r (poly1305_update_nblocks rw b acc_v0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) (normalize_n r acc_v0))
let poly_update_nblocks_lemma2 r b acc_v0 =
let acc0 = normalize_n r acc_v0 in
let b0 = Seq.slice b 0 size_block in
let b1 = Seq.slice b size_block (2 * size_block) in
let c0 = pfadd (pow2 128) (nat_from_bytes_le b0) in
let c1 = pfadd (pow2 128) (nat_from_bytes_le b1) in
let f = S.poly1305_update1 r size_block in
let nb = (2 * size_block) / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 1;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0;
Lemmas.poly_update_multi_lemma_load2_simplify acc0 r c0 c1;
Lemmas.poly_update_repeat_blocks_multi_lemma2_simplify acc_v0.[0] acc_v0.[1] c0 c1 r
val poly_update_nblocks_lemma4: r:pfelem -> b:block_v 4 -> acc_v0:elem 4 -> Lemma
(let rw = compute_rw #4 r in
normalize_n r (poly1305_update_nblocks rw b acc_v0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) (normalize_n r acc_v0))
let poly_update_nblocks_lemma4 r b acc_v0 =
let acc0 = normalize_n r acc_v0 in
let b0 = Seq.slice b 0 size_block in
let b1 = Seq.slice b size_block (2 * size_block) in
let b2 = Seq.slice b (2 * size_block) (3 * size_block) in
let b3 = Seq.slice b (3 * size_block) (4 * size_block) in
let c0 = pfadd (pow2 128) (nat_from_bytes_le b0) in
let c1 = pfadd (pow2 128) (nat_from_bytes_le b1) in
let c2 = pfadd (pow2 128) (nat_from_bytes_le b2) in
let c3 = pfadd (pow2 128) (nat_from_bytes_le b3) in
let r2 = pfmul r r in
let r4 = pfmul r2 r2 in
let f = S.poly1305_update1 r size_block in
let nb = (4 * size_block) / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 3;
Loops.unfold_repeati nb repeat_f acc0 2;
Loops.unfold_repeati nb repeat_f acc0 1;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0;
Lemmas.poly_update_repeat_blocks_multi_lemma4_simplify
acc_v0.[0] acc_v0.[1] acc_v0.[2] acc_v0.[3] c0 c1 c2 c3 r r2 r4
val poly_update_nblocks_lemma: #w:lanes -> r:pfelem -> b:block_v w -> acc_v0:elem w -> Lemma
(let rw = compute_rw #w r in
normalize_n r (poly1305_update_nblocks rw b acc_v0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) (normalize_n r acc_v0))
let poly_update_nblocks_lemma #w r b acc_v0 =
match w with
| 1 -> poly_update_nblocks_lemma1 r b acc_v0
| 2 -> poly_update_nblocks_lemma2 r b acc_v0
| 4 -> poly_update_nblocks_lemma4 r b acc_v0
val repeat_blocks_multi_vec_equiv_pre_lemma: #w:lanes -> r:pfelem -> b:block_v w -> acc_v0:elem w -> Lemma
(let rw = compute_rw #w r in
let f = S.poly1305_update1 r size_block in
let f_v = poly1305_update_nblocks rw in
VecLemmas.repeat_blocks_multi_vec_equiv_pre w size_block f f_v (normalize_n r) b acc_v0)
let repeat_blocks_multi_vec_equiv_pre_lemma #w r b acc_v0 =
poly_update_nblocks_lemma #w r b acc_v0
val poly_update_multi_lemma_v:
#w:lanes
-> text:bytes{length text % (w * size_block) = 0 /\ length text % size_block = 0}
-> acc_v0:elem w
-> r:pfelem -> Lemma
(let rw = compute_rw #w r in
let f = S.poly1305_update1 r size_block in
let f_v = poly1305_update_nblocks rw in
normalize_n r (repeat_blocks_multi (w * size_block) text f_v acc_v0) ==
repeat_blocks_multi size_block text f (normalize_n r acc_v0))
let poly_update_multi_lemma_v #w text acc_v0 r =
let rw = compute_rw #w r in
let f = S.poly1305_update1 r size_block in
let f_v = poly1305_update_nblocks rw in
Classical.forall_intro_2 (repeat_blocks_multi_vec_equiv_pre_lemma #w r);
VecLemmas.lemma_repeat_blocks_multi_vec w size_block text f f_v (normalize_n r) acc_v0
val poly_update_multi_lemma:
#w:lanes
-> text:bytes{w * size_block <= length text /\ length text % (w * size_block) = 0 /\ length text % size_block = 0}
-> acc0:pfelem
-> r:pfelem -> Lemma
(poly1305_update_multi #w text acc0 r ==
repeat_blocks_multi size_block text (S.poly1305_update1 r size_block) acc0) | {
"checked_file": "/",
"dependencies": [
"Spec.Poly1305.fst.checked",
"prims.fst.checked",
"Lib.Vec.Lemmas.fsti.checked",
"Lib.Sequence.Lemmas.fsti.checked",
"Lib.Sequence.fsti.checked",
"Lib.LoopCombinators.fsti.checked",
"Lib.IntVector.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteSequence.fsti.checked",
"Hacl.Spec.Poly1305.Vec.fst.checked",
"Hacl.Spec.Poly1305.Lemmas.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "Hacl.Spec.Poly1305.Equiv.fst"
} | [
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Vec",
"short_module": null
},
{
"abbrev": true,
"full_module": "Spec.Poly1305",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Hacl.Spec.Poly1305.Lemmas",
"short_module": "Lemmas"
},
{
"abbrev": true,
"full_module": "Lib.Sequence.Lemmas",
"short_module": "SeqLemmas"
},
{
"abbrev": true,
"full_module": "Lib.Vec.Lemmas",
"short_module": "VecLemmas"
},
{
"abbrev": true,
"full_module": "Lib.LoopCombinators",
"short_module": "Loops"
},
{
"abbrev": false,
"full_module": "Lib.IntVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteSequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 50,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
text:
Lib.ByteSequence.bytes
{ w * Hacl.Spec.Poly1305.Vec.size_block <= Lib.Sequence.length text /\
Lib.Sequence.length text % (w * Hacl.Spec.Poly1305.Vec.size_block) = 0 /\
Lib.Sequence.length text % Hacl.Spec.Poly1305.Vec.size_block = 0 } ->
acc0: Hacl.Spec.Poly1305.Vec.pfelem ->
r: Hacl.Spec.Poly1305.Vec.pfelem
-> FStar.Pervasives.Lemma
(ensures
Hacl.Spec.Poly1305.Vec.poly1305_update_multi text acc0 r ==
Lib.Sequence.repeat_blocks_multi Hacl.Spec.Poly1305.Vec.size_block
text
(Spec.Poly1305.poly1305_update1 r Hacl.Spec.Poly1305.Vec.size_block)
acc0) | FStar.Pervasives.Lemma | [
"lemma"
] | [] | [
"Hacl.Spec.Poly1305.Vec.lanes",
"Lib.ByteSequence.bytes",
"Prims.l_and",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"FStar.Mul.op_Star",
"Hacl.Spec.Poly1305.Vec.size_block",
"Lib.Sequence.length",
"Lib.IntTypes.uint_t",
"Lib.IntTypes.U8",
"Lib.IntTypes.SEC",
"Prims.op_Equality",
"Prims.int",
"Prims.op_Modulus",
"Hacl.Spec.Poly1305.Vec.pfelem",
"Lib.Sequence.Lemmas.repeat_blocks_multi_split",
"Spec.Poly1305.felem",
"Prims.unit",
"Hacl.Spec.Poly1305.Equiv.load_acc_lemma",
"Hacl.Spec.Poly1305.Equiv.poly_update_multi_lemma_v",
"Hacl.Spec.Poly1305.Vec.poly1305_update_multi",
"Hacl.Spec.Poly1305.Vec.elem",
"Hacl.Spec.Poly1305.Vec.load_acc",
"Lib.Sequence.lseq",
"Lib.IntTypes.int_t",
"Spec.Poly1305.poly1305_update1",
"Prims._assert",
"FStar.Math.Lemmas.modulo_addition_lemma",
"Prims.op_Minus",
"FStar.Seq.Base.seq",
"FStar.Seq.Base.slice",
"Prims.nat"
] | [] | true | false | true | false | false | let poly_update_multi_lemma #w text acc0 r =
| let len = length text in
let blocksize_v = w * size_block in
let text0 = Seq.slice text 0 blocksize_v in
let text1 = Seq.slice text blocksize_v len in
FStar.Math.Lemmas.modulo_addition_lemma len blocksize_v (- 1);
assert (length text1 % (w * size_block) = 0 /\ length text1 % size_block = 0);
let f = S.poly1305_update1 r size_block in
let acc_v0 = load_acc #w text0 acc0 in
let rp = poly1305_update_multi #w text acc0 r in
poly_update_multi_lemma_v #w text1 acc_v0 r;
load_acc_lemma #w text0 acc0 r;
SeqLemmas.repeat_blocks_multi_split size_block blocksize_v text f acc0 | false |
Vale.AES.PPC64LE.GF128_Mul.fst | Vale.AES.PPC64LE.GF128_Mul.va_codegen_success_ShiftLeft128_1 | val va_codegen_success_ShiftLeft128_1 : va_dummy:unit -> Tot va_pbool | val va_codegen_success_ShiftLeft128_1 : va_dummy:unit -> Tot va_pbool | let va_codegen_success_ShiftLeft128_1 () =
(va_pbool_and (va_codegen_success_Vspltisb (va_op_vec_opr_vec 2) 1) (va_pbool_and
(va_codegen_success_Vsl (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 2))
(va_ttrue ()))) | {
"file_name": "obj/Vale.AES.PPC64LE.GF128_Mul.fst",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 19,
"end_line": 34,
"start_col": 0,
"start_line": 31
} | module Vale.AES.PPC64LE.GF128_Mul
open Vale.Def.Types_s
open Vale.Arch.Types
open Vale.Arch.TypesNative
open Vale.Math.Poly2_s
open Vale.Math.Poly2
open Vale.Math.Poly2.Bits_s
open Vale.Math.Poly2.Bits
open Vale.Math.Poly2.Lemmas
open Vale.AES.GF128_s
open Vale.AES.GF128
open Vale.PPC64LE.Machine_s
open Vale.PPC64LE.State
open Vale.PPC64LE.Decls
open Vale.PPC64LE.InsBasic
open Vale.PPC64LE.InsMem
open Vale.PPC64LE.InsVector
open Vale.PPC64LE.QuickCode
open Vale.PPC64LE.QuickCodes
open Vale.AES.PPC64LE.PolyOps
open Vale.AES.Types_helpers
open Vale.AES.GHash_BE
//-- ShiftLeft128_1
[@ "opaque_to_smt" va_qattr]
let va_code_ShiftLeft128_1 () =
(va_Block (va_CCons (va_code_Vspltisb (va_op_vec_opr_vec 2) 1) (va_CCons (va_code_Vsl
(va_op_vec_opr_vec 1) (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 2)) (va_CNil ())))) | {
"checked_file": "/",
"dependencies": [
"Vale.PPC64LE.State.fsti.checked",
"Vale.PPC64LE.QuickCodes.fsti.checked",
"Vale.PPC64LE.QuickCode.fst.checked",
"Vale.PPC64LE.Machine_s.fst.checked",
"Vale.PPC64LE.InsVector.fsti.checked",
"Vale.PPC64LE.InsMem.fsti.checked",
"Vale.PPC64LE.InsBasic.fsti.checked",
"Vale.PPC64LE.Decls.fsti.checked",
"Vale.Math.Poly2_s.fsti.checked",
"Vale.Math.Poly2.Words.fsti.checked",
"Vale.Math.Poly2.Lemmas.fsti.checked",
"Vale.Math.Poly2.Bits_s.fsti.checked",
"Vale.Math.Poly2.Bits.fsti.checked",
"Vale.Math.Poly2.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Arch.TypesNative.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.Types_helpers.fsti.checked",
"Vale.AES.PPC64LE.PolyOps.fsti.checked",
"Vale.AES.GHash_BE.fsti.checked",
"Vale.AES.GF128_s.fsti.checked",
"Vale.AES.GF128.fsti.checked",
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.PPC64LE.GF128_Mul.fst"
} | [
{
"abbrev": false,
"full_module": "Vale.AES.GHash_BE",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.PPC64LE.PolyOps",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Bits",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Bits_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.TypesNative",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.PPC64LE",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.PPC64LE",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | va_dummy: Prims.unit -> Vale.PPC64LE.Decls.va_pbool | Prims.Tot | [
"total"
] | [] | [
"Prims.unit",
"Vale.PPC64LE.Decls.va_pbool_and",
"Vale.PPC64LE.InsVector.va_codegen_success_Vspltisb",
"Vale.PPC64LE.Decls.va_op_vec_opr_vec",
"Vale.PPC64LE.InsVector.va_codegen_success_Vsl",
"Vale.PPC64LE.Decls.va_ttrue",
"Vale.PPC64LE.Decls.va_pbool"
] | [] | false | false | false | true | false | let va_codegen_success_ShiftLeft128_1 () =
| (va_pbool_and (va_codegen_success_Vspltisb (va_op_vec_opr_vec 2) 1)
(va_pbool_and (va_codegen_success_Vsl (va_op_vec_opr_vec 1)
(va_op_vec_opr_vec 1)
(va_op_vec_opr_vec 2))
(va_ttrue ()))) | false |
Hacl.Spec.Poly1305.Equiv.fst | Hacl.Spec.Poly1305.Equiv.poly_update_nblocks_lemma2 | val poly_update_nblocks_lemma2: r:pfelem -> b:block_v 2 -> acc_v0:elem 2 -> Lemma
(let rw = compute_rw #2 r in
normalize_n r (poly1305_update_nblocks rw b acc_v0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) (normalize_n r acc_v0)) | val poly_update_nblocks_lemma2: r:pfelem -> b:block_v 2 -> acc_v0:elem 2 -> Lemma
(let rw = compute_rw #2 r in
normalize_n r (poly1305_update_nblocks rw b acc_v0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) (normalize_n r acc_v0)) | let poly_update_nblocks_lemma2 r b acc_v0 =
let acc0 = normalize_n r acc_v0 in
let b0 = Seq.slice b 0 size_block in
let b1 = Seq.slice b size_block (2 * size_block) in
let c0 = pfadd (pow2 128) (nat_from_bytes_le b0) in
let c1 = pfadd (pow2 128) (nat_from_bytes_le b1) in
let f = S.poly1305_update1 r size_block in
let nb = (2 * size_block) / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 1;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0;
Lemmas.poly_update_multi_lemma_load2_simplify acc0 r c0 c1;
Lemmas.poly_update_repeat_blocks_multi_lemma2_simplify acc_v0.[0] acc_v0.[1] c0 c1 r | {
"file_name": "code/poly1305/Hacl.Spec.Poly1305.Equiv.fst",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 86,
"end_line": 149,
"start_col": 0,
"start_line": 133
} | module Hacl.Spec.Poly1305.Equiv
open FStar.Mul
open Lib.IntTypes
open Lib.Sequence
open Lib.ByteSequence
open Lib.IntVector
module Loops = Lib.LoopCombinators
module VecLemmas = Lib.Vec.Lemmas
module SeqLemmas = Lib.Sequence.Lemmas
module Lemmas = Hacl.Spec.Poly1305.Lemmas
module S = Spec.Poly1305
include Hacl.Spec.Poly1305.Vec
#set-options "--z3rlimit 50 --max_fuel 0 --max_ifuel 0"
let block_v (w:lanes{w * size_block <= max_size_t}) = lbytes (w * size_block)
///
/// val load_acc_lemma: #w:lanes -> b:block_v w -> acc0:pfelem -> r:pfelem -> Lemma
/// (normalize_n r (load_acc b acc0) == repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
///
val load_acc_lemma1: b:block_v 1 -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc #1 b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
let load_acc_lemma1 b acc0 r =
let f = S.poly1305_update1 r size_block in
let nb = size_block / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0
val load_acc_lemma2: b:block_v 2 -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
let load_acc_lemma2 b acc0 r =
let b0 = Seq.slice b 0 size_block in
let b1 = Seq.slice b size_block (2 * size_block) in
let c0 = pfadd (pow2 128) (nat_from_bytes_le b0) in
let c1 = pfadd (pow2 128) (nat_from_bytes_le b1) in
FStar.Math.Lemmas.modulo_lemma c1 prime;
let f = S.poly1305_update1 r size_block in
let nb = (2 * size_block) / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 1;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0;
Lemmas.poly_update_multi_lemma_load2_simplify acc0 r c0 c1
val load_acc_lemma4: b:block_v 4 -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
let load_acc_lemma4 b acc0 r =
let b0 = Seq.slice b 0 size_block in
let b1 = Seq.slice b size_block (2 * size_block) in
let b2 = Seq.slice b (2 * size_block) (3 * size_block) in
let b3 = Seq.slice b (3 * size_block) (4 * size_block) in
let c0 = pfadd (pow2 128) (nat_from_bytes_le b0) in
let c1 = pfadd (pow2 128) (nat_from_bytes_le b1) in
let c2 = pfadd (pow2 128) (nat_from_bytes_le b2) in
let c3 = pfadd (pow2 128) (nat_from_bytes_le b3) in
FStar.Math.Lemmas.modulo_lemma c1 prime;
FStar.Math.Lemmas.modulo_lemma c2 prime;
FStar.Math.Lemmas.modulo_lemma c3 prime;
let f = S.poly1305_update1 r size_block in
let nb = (4 * size_block) / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 3;
Loops.unfold_repeati nb repeat_f acc0 2;
Loops.unfold_repeati nb repeat_f acc0 1;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0;
Lemmas.poly_update_multi_lemma_load4_simplify acc0 r c0 c1 c2 c3
val load_acc_lemma: #w:lanes -> b:block_v w -> acc0:pfelem -> r:pfelem -> Lemma
(normalize_n r (load_acc b acc0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) acc0)
let load_acc_lemma #w b acc0 r =
match w with
| 1 -> load_acc_lemma1 b acc0 r
| 2 -> load_acc_lemma2 b acc0 r
| 4 -> load_acc_lemma4 b acc0 r
///
/// val poly_update_nblocks_lemma: #w:lanes -> r:pfelem -> b:block_v w -> acc_v0:elem w -> Lemma
/// (let rw = compute_rw r in
/// normalize_n r (poly_update_nblocks #w rw b acc_v0) ==
/// repeat_blocks_multi size_block b (poly_update1 r) (normalize_n r acc_v0))
///
val poly_update_nblocks_lemma1: r:pfelem -> b:block_v 1 -> acc_v0:elem 1 -> Lemma
(let rw = compute_rw #1 r in
normalize_n r (poly1305_update_nblocks rw b acc_v0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) (normalize_n r acc_v0))
let poly_update_nblocks_lemma1 r b acc_v0 =
let acc0 = normalize_n r acc_v0 in
let f = S.poly1305_update1 r size_block in
let nb = size_block / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0
val poly_update_nblocks_lemma2: r:pfelem -> b:block_v 2 -> acc_v0:elem 2 -> Lemma
(let rw = compute_rw #2 r in
normalize_n r (poly1305_update_nblocks rw b acc_v0) ==
repeat_blocks_multi size_block b (S.poly1305_update1 r size_block) (normalize_n r acc_v0)) | {
"checked_file": "/",
"dependencies": [
"Spec.Poly1305.fst.checked",
"prims.fst.checked",
"Lib.Vec.Lemmas.fsti.checked",
"Lib.Sequence.Lemmas.fsti.checked",
"Lib.Sequence.fsti.checked",
"Lib.LoopCombinators.fsti.checked",
"Lib.IntVector.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteSequence.fsti.checked",
"Hacl.Spec.Poly1305.Vec.fst.checked",
"Hacl.Spec.Poly1305.Lemmas.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "Hacl.Spec.Poly1305.Equiv.fst"
} | [
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Vec",
"short_module": null
},
{
"abbrev": true,
"full_module": "Spec.Poly1305",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Hacl.Spec.Poly1305.Lemmas",
"short_module": "Lemmas"
},
{
"abbrev": true,
"full_module": "Lib.Sequence.Lemmas",
"short_module": "SeqLemmas"
},
{
"abbrev": true,
"full_module": "Lib.Vec.Lemmas",
"short_module": "VecLemmas"
},
{
"abbrev": true,
"full_module": "Lib.LoopCombinators",
"short_module": "Loops"
},
{
"abbrev": false,
"full_module": "Lib.IntVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteSequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 50,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
r: Hacl.Spec.Poly1305.Vec.pfelem ->
b: Hacl.Spec.Poly1305.Equiv.block_v 2 ->
acc_v0: Hacl.Spec.Poly1305.Vec.elem 2
-> FStar.Pervasives.Lemma
(ensures
(let rw = Hacl.Spec.Poly1305.Vec.compute_rw r in
Hacl.Spec.Poly1305.Vec.normalize_n r
(Hacl.Spec.Poly1305.Vec.poly1305_update_nblocks rw b acc_v0) ==
Lib.Sequence.repeat_blocks_multi Hacl.Spec.Poly1305.Vec.size_block
b
(Spec.Poly1305.poly1305_update1 r Hacl.Spec.Poly1305.Vec.size_block)
(Hacl.Spec.Poly1305.Vec.normalize_n r acc_v0))) | FStar.Pervasives.Lemma | [
"lemma"
] | [] | [
"Hacl.Spec.Poly1305.Vec.pfelem",
"Hacl.Spec.Poly1305.Equiv.block_v",
"Hacl.Spec.Poly1305.Vec.elem",
"Hacl.Spec.Poly1305.Lemmas.poly_update_repeat_blocks_multi_lemma2_simplify",
"Lib.Sequence.op_String_Access",
"Prims.unit",
"Hacl.Spec.Poly1305.Lemmas.poly_update_multi_lemma_load2_simplify",
"Lib.LoopCombinators.eq_repeati0",
"Spec.Poly1305.felem",
"Lib.LoopCombinators.unfold_repeati",
"Lib.Sequence.lemma_repeat_blocks_multi",
"Lib.IntTypes.uint_t",
"Lib.IntTypes.U8",
"Lib.IntTypes.SEC",
"Hacl.Spec.Poly1305.Vec.size_block",
"Prims.nat",
"Prims.b2t",
"Prims.op_LessThan",
"Lib.Sequence.repeat_blocks_f",
"Prims.int",
"Prims.op_Division",
"FStar.Mul.op_Star",
"Lib.Sequence.lseq",
"Lib.IntTypes.int_t",
"Spec.Poly1305.poly1305_update1",
"Hacl.Spec.Poly1305.Vec.pfadd",
"Prims.pow2",
"Lib.ByteSequence.nat_from_bytes_le",
"FStar.Seq.Base.seq",
"FStar.Seq.Base.slice",
"Hacl.Spec.Poly1305.Vec.normalize_n"
] | [] | true | false | true | false | false | let poly_update_nblocks_lemma2 r b acc_v0 =
| let acc0 = normalize_n r acc_v0 in
let b0 = Seq.slice b 0 size_block in
let b1 = Seq.slice b size_block (2 * size_block) in
let c0 = pfadd (pow2 128) (nat_from_bytes_le b0) in
let c1 = pfadd (pow2 128) (nat_from_bytes_le b1) in
let f = S.poly1305_update1 r size_block in
let nb = (2 * size_block) / size_block in
let repeat_f = repeat_blocks_f size_block b f nb in
lemma_repeat_blocks_multi size_block b f acc0;
Loops.unfold_repeati nb repeat_f acc0 1;
Loops.unfold_repeati nb repeat_f acc0 0;
Loops.eq_repeati0 nb repeat_f acc0;
Lemmas.poly_update_multi_lemma_load2_simplify acc0 r c0 c1;
Lemmas.poly_update_repeat_blocks_multi_lemma2_simplify acc_v0.[ 0 ] acc_v0.[ 1 ] c0 c1 r | false |
Vale.AES.PPC64LE.GF128_Mul.fst | Vale.AES.PPC64LE.GF128_Mul.va_codegen_success_ShiftLeft2_128_1 | val va_codegen_success_ShiftLeft2_128_1 : va_dummy:unit -> Tot va_pbool | val va_codegen_success_ShiftLeft2_128_1 : va_dummy:unit -> Tot va_pbool | let va_codegen_success_ShiftLeft2_128_1 () =
(va_pbool_and (va_codegen_success_Vspltisw (va_op_vec_opr_vec 0) 0) (va_pbool_and
(va_codegen_success_LoadImm64 (va_op_reg_opr_reg 10) 31) (va_pbool_and
(va_codegen_success_Mtvsrws (va_op_vec_opr_vec 3) (va_op_reg_opr_reg 10)) (va_pbool_and
(va_codegen_success_Vsrw (va_op_vec_opr_vec 3) (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 3))
(va_pbool_and (va_codegen_success_Vsldoi (va_op_vec_opr_vec 3) (va_op_vec_opr_vec 0)
(va_op_vec_opr_vec 3) 4) (va_pbool_and (va_codegen_success_Vspltisb (va_op_vec_opr_vec 0) 1)
(va_pbool_and (va_codegen_success_Vsl (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 1)
(va_op_vec_opr_vec 0)) (va_pbool_and (va_codegen_success_Vsl (va_op_vec_opr_vec 2)
(va_op_vec_opr_vec 2) (va_op_vec_opr_vec 0)) (va_pbool_and (va_codegen_success_Vxor
(va_op_vec_opr_vec 2) (va_op_vec_opr_vec 2) (va_op_vec_opr_vec 3)) (va_ttrue ())))))))))) | {
"file_name": "obj/Vale.AES.PPC64LE.GF128_Mul.fst",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 93,
"end_line": 100,
"start_col": 0,
"start_line": 90
} | module Vale.AES.PPC64LE.GF128_Mul
open Vale.Def.Types_s
open Vale.Arch.Types
open Vale.Arch.TypesNative
open Vale.Math.Poly2_s
open Vale.Math.Poly2
open Vale.Math.Poly2.Bits_s
open Vale.Math.Poly2.Bits
open Vale.Math.Poly2.Lemmas
open Vale.AES.GF128_s
open Vale.AES.GF128
open Vale.PPC64LE.Machine_s
open Vale.PPC64LE.State
open Vale.PPC64LE.Decls
open Vale.PPC64LE.InsBasic
open Vale.PPC64LE.InsMem
open Vale.PPC64LE.InsVector
open Vale.PPC64LE.QuickCode
open Vale.PPC64LE.QuickCodes
open Vale.AES.PPC64LE.PolyOps
open Vale.AES.Types_helpers
open Vale.AES.GHash_BE
//-- ShiftLeft128_1
[@ "opaque_to_smt" va_qattr]
let va_code_ShiftLeft128_1 () =
(va_Block (va_CCons (va_code_Vspltisb (va_op_vec_opr_vec 2) 1) (va_CCons (va_code_Vsl
(va_op_vec_opr_vec 1) (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 2)) (va_CNil ()))))
[@ "opaque_to_smt" va_qattr]
let va_codegen_success_ShiftLeft128_1 () =
(va_pbool_and (va_codegen_success_Vspltisb (va_op_vec_opr_vec 2) 1) (va_pbool_and
(va_codegen_success_Vsl (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 2))
(va_ttrue ())))
[@ "opaque_to_smt" va_qattr]
let va_qcode_ShiftLeft128_1 (va_mods:va_mods_t) (a:poly) : (va_quickCode unit
(va_code_ShiftLeft128_1 ())) =
(qblock va_mods (fun (va_s:va_state) -> let (va_old_s:va_state) = va_s in va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 60 column 13 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(va_quick_Vspltisb (va_op_vec_opr_vec 2) 1) (va_QBind va_range1
"***** PRECONDITION NOT MET AT line 61 column 8 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(va_quick_Vsl (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 2)) (fun
(va_s:va_state) _ -> let (va_arg5:Vale.Math.Poly2_s.poly) = a in va_qPURE va_range1
"***** PRECONDITION NOT MET AT line 63 column 23 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(fun (_:unit) -> Vale.AES.GF128.lemma_shift_left_1 va_arg5) (va_QEmpty (()))))))
[@"opaque_to_smt"]
let va_lemma_ShiftLeft128_1 va_b0 va_s0 a =
let (va_mods:va_mods_t) = [va_Mod_vec 2; va_Mod_vec 1; va_Mod_ok] in
let va_qc = va_qcode_ShiftLeft128_1 va_mods a in
let (va_sM, va_fM, va_g) = va_wp_sound_code_norm (va_code_ShiftLeft128_1 ()) va_qc va_s0 (fun
va_s0 va_sM va_g -> let () = va_g in label va_range1
"***** POSTCONDITION NOT MET AT line 49 column 1 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(va_get_ok va_sM) /\ label va_range1
"***** POSTCONDITION NOT MET AT line 58 column 37 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(va_get_vec 1 va_sM == Vale.Math.Poly2.Bits_s.to_quad32 (Vale.Math.Poly2_s.shift a 1))) in
assert_norm (va_qc.mods == va_mods);
va_lemma_norm_mods ([va_Mod_vec 2; va_Mod_vec 1; va_Mod_ok]) va_sM va_s0;
(va_sM, va_fM)
[@"opaque_to_smt"]
let va_wpProof_ShiftLeft128_1 a va_s0 va_k =
let (va_sM, va_f0) = va_lemma_ShiftLeft128_1 (va_code_ShiftLeft128_1 ()) va_s0 a in
va_lemma_upd_update va_sM;
assert (va_state_eq va_sM (va_update_vec 2 va_sM (va_update_vec 1 va_sM (va_update_ok va_sM
va_s0))));
va_lemma_norm_mods ([va_Mod_vec 2; va_Mod_vec 1]) va_sM va_s0;
let va_g = () in
(va_sM, va_f0, va_g)
//--
//-- ShiftLeft2_128_1
val va_code_ShiftLeft2_128_1 : va_dummy:unit -> Tot va_code
[@ "opaque_to_smt" va_qattr]
let va_code_ShiftLeft2_128_1 () =
(va_Block (va_CCons (va_code_Vspltisw (va_op_vec_opr_vec 0) 0) (va_CCons (va_code_LoadImm64
(va_op_reg_opr_reg 10) 31) (va_CCons (va_code_Mtvsrws (va_op_vec_opr_vec 3) (va_op_reg_opr_reg
10)) (va_CCons (va_code_Vsrw (va_op_vec_opr_vec 3) (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 3))
(va_CCons (va_code_Vsldoi (va_op_vec_opr_vec 3) (va_op_vec_opr_vec 0) (va_op_vec_opr_vec 3) 4)
(va_CCons (va_code_Vspltisb (va_op_vec_opr_vec 0) 1) (va_CCons (va_code_Vsl (va_op_vec_opr_vec
1) (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 0)) (va_CCons (va_code_Vsl (va_op_vec_opr_vec 2)
(va_op_vec_opr_vec 2) (va_op_vec_opr_vec 0)) (va_CCons (va_code_Vxor (va_op_vec_opr_vec 2)
(va_op_vec_opr_vec 2) (va_op_vec_opr_vec 3)) (va_CNil ())))))))))))
val va_codegen_success_ShiftLeft2_128_1 : va_dummy:unit -> Tot va_pbool | {
"checked_file": "/",
"dependencies": [
"Vale.PPC64LE.State.fsti.checked",
"Vale.PPC64LE.QuickCodes.fsti.checked",
"Vale.PPC64LE.QuickCode.fst.checked",
"Vale.PPC64LE.Machine_s.fst.checked",
"Vale.PPC64LE.InsVector.fsti.checked",
"Vale.PPC64LE.InsMem.fsti.checked",
"Vale.PPC64LE.InsBasic.fsti.checked",
"Vale.PPC64LE.Decls.fsti.checked",
"Vale.Math.Poly2_s.fsti.checked",
"Vale.Math.Poly2.Words.fsti.checked",
"Vale.Math.Poly2.Lemmas.fsti.checked",
"Vale.Math.Poly2.Bits_s.fsti.checked",
"Vale.Math.Poly2.Bits.fsti.checked",
"Vale.Math.Poly2.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Arch.TypesNative.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.Types_helpers.fsti.checked",
"Vale.AES.PPC64LE.PolyOps.fsti.checked",
"Vale.AES.GHash_BE.fsti.checked",
"Vale.AES.GF128_s.fsti.checked",
"Vale.AES.GF128.fsti.checked",
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.PPC64LE.GF128_Mul.fst"
} | [
{
"abbrev": false,
"full_module": "Vale.AES.GHash_BE",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.PPC64LE.PolyOps",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Bits",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Bits_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.TypesNative",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash_BE",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.PPC64LE.PolyOps",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Bits",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Bits_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.TypesNative",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.PPC64LE",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.PPC64LE",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | va_dummy: Prims.unit -> Vale.PPC64LE.Decls.va_pbool | Prims.Tot | [
"total"
] | [] | [
"Prims.unit",
"Vale.PPC64LE.Decls.va_pbool_and",
"Vale.PPC64LE.InsVector.va_codegen_success_Vspltisw",
"Vale.PPC64LE.Decls.va_op_vec_opr_vec",
"Vale.PPC64LE.InsBasic.va_codegen_success_LoadImm64",
"Vale.PPC64LE.Decls.va_op_reg_opr_reg",
"Vale.PPC64LE.InsVector.va_codegen_success_Mtvsrws",
"Vale.PPC64LE.InsVector.va_codegen_success_Vsrw",
"Vale.PPC64LE.InsVector.va_codegen_success_Vsldoi",
"Vale.PPC64LE.InsVector.va_codegen_success_Vspltisb",
"Vale.PPC64LE.InsVector.va_codegen_success_Vsl",
"Vale.PPC64LE.InsVector.va_codegen_success_Vxor",
"Vale.PPC64LE.Decls.va_ttrue",
"Vale.PPC64LE.Decls.va_pbool"
] | [] | false | false | false | true | false | let va_codegen_success_ShiftLeft2_128_1 () =
| (va_pbool_and (va_codegen_success_Vspltisw (va_op_vec_opr_vec 0) 0)
(va_pbool_and (va_codegen_success_LoadImm64 (va_op_reg_opr_reg 10) 31)
(va_pbool_and (va_codegen_success_Mtvsrws (va_op_vec_opr_vec 3) (va_op_reg_opr_reg 10))
(va_pbool_and (va_codegen_success_Vsrw (va_op_vec_opr_vec 3)
(va_op_vec_opr_vec 1)
(va_op_vec_opr_vec 3))
(va_pbool_and (va_codegen_success_Vsldoi (va_op_vec_opr_vec 3)
(va_op_vec_opr_vec 0)
(va_op_vec_opr_vec 3)
4)
(va_pbool_and (va_codegen_success_Vspltisb (va_op_vec_opr_vec 0) 1)
(va_pbool_and (va_codegen_success_Vsl (va_op_vec_opr_vec 1)
(va_op_vec_opr_vec 1)
(va_op_vec_opr_vec 0))
(va_pbool_and (va_codegen_success_Vsl (va_op_vec_opr_vec 2)
(va_op_vec_opr_vec 2)
(va_op_vec_opr_vec 0))
(va_pbool_and (va_codegen_success_Vxor (va_op_vec_opr_vec 2)
(va_op_vec_opr_vec 2)
(va_op_vec_opr_vec 3))
(va_ttrue ())))))))))) | false |
Vale.AES.PPC64LE.GF128_Mul.fst | Vale.AES.PPC64LE.GF128_Mul.va_code_ShiftLeft2_128_1 | val va_code_ShiftLeft2_128_1 : va_dummy:unit -> Tot va_code | val va_code_ShiftLeft2_128_1 : va_dummy:unit -> Tot va_code | let va_code_ShiftLeft2_128_1 () =
(va_Block (va_CCons (va_code_Vspltisw (va_op_vec_opr_vec 0) 0) (va_CCons (va_code_LoadImm64
(va_op_reg_opr_reg 10) 31) (va_CCons (va_code_Mtvsrws (va_op_vec_opr_vec 3) (va_op_reg_opr_reg
10)) (va_CCons (va_code_Vsrw (va_op_vec_opr_vec 3) (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 3))
(va_CCons (va_code_Vsldoi (va_op_vec_opr_vec 3) (va_op_vec_opr_vec 0) (va_op_vec_opr_vec 3) 4)
(va_CCons (va_code_Vspltisb (va_op_vec_opr_vec 0) 1) (va_CCons (va_code_Vsl (va_op_vec_opr_vec
1) (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 0)) (va_CCons (va_code_Vsl (va_op_vec_opr_vec 2)
(va_op_vec_opr_vec 2) (va_op_vec_opr_vec 0)) (va_CCons (va_code_Vxor (va_op_vec_opr_vec 2)
(va_op_vec_opr_vec 2) (va_op_vec_opr_vec 3)) (va_CNil ()))))))))))) | {
"file_name": "obj/Vale.AES.PPC64LE.GF128_Mul.fst",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 71,
"end_line": 86,
"start_col": 0,
"start_line": 78
} | module Vale.AES.PPC64LE.GF128_Mul
open Vale.Def.Types_s
open Vale.Arch.Types
open Vale.Arch.TypesNative
open Vale.Math.Poly2_s
open Vale.Math.Poly2
open Vale.Math.Poly2.Bits_s
open Vale.Math.Poly2.Bits
open Vale.Math.Poly2.Lemmas
open Vale.AES.GF128_s
open Vale.AES.GF128
open Vale.PPC64LE.Machine_s
open Vale.PPC64LE.State
open Vale.PPC64LE.Decls
open Vale.PPC64LE.InsBasic
open Vale.PPC64LE.InsMem
open Vale.PPC64LE.InsVector
open Vale.PPC64LE.QuickCode
open Vale.PPC64LE.QuickCodes
open Vale.AES.PPC64LE.PolyOps
open Vale.AES.Types_helpers
open Vale.AES.GHash_BE
//-- ShiftLeft128_1
[@ "opaque_to_smt" va_qattr]
let va_code_ShiftLeft128_1 () =
(va_Block (va_CCons (va_code_Vspltisb (va_op_vec_opr_vec 2) 1) (va_CCons (va_code_Vsl
(va_op_vec_opr_vec 1) (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 2)) (va_CNil ()))))
[@ "opaque_to_smt" va_qattr]
let va_codegen_success_ShiftLeft128_1 () =
(va_pbool_and (va_codegen_success_Vspltisb (va_op_vec_opr_vec 2) 1) (va_pbool_and
(va_codegen_success_Vsl (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 2))
(va_ttrue ())))
[@ "opaque_to_smt" va_qattr]
let va_qcode_ShiftLeft128_1 (va_mods:va_mods_t) (a:poly) : (va_quickCode unit
(va_code_ShiftLeft128_1 ())) =
(qblock va_mods (fun (va_s:va_state) -> let (va_old_s:va_state) = va_s in va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 60 column 13 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(va_quick_Vspltisb (va_op_vec_opr_vec 2) 1) (va_QBind va_range1
"***** PRECONDITION NOT MET AT line 61 column 8 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(va_quick_Vsl (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 2)) (fun
(va_s:va_state) _ -> let (va_arg5:Vale.Math.Poly2_s.poly) = a in va_qPURE va_range1
"***** PRECONDITION NOT MET AT line 63 column 23 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(fun (_:unit) -> Vale.AES.GF128.lemma_shift_left_1 va_arg5) (va_QEmpty (()))))))
[@"opaque_to_smt"]
let va_lemma_ShiftLeft128_1 va_b0 va_s0 a =
let (va_mods:va_mods_t) = [va_Mod_vec 2; va_Mod_vec 1; va_Mod_ok] in
let va_qc = va_qcode_ShiftLeft128_1 va_mods a in
let (va_sM, va_fM, va_g) = va_wp_sound_code_norm (va_code_ShiftLeft128_1 ()) va_qc va_s0 (fun
va_s0 va_sM va_g -> let () = va_g in label va_range1
"***** POSTCONDITION NOT MET AT line 49 column 1 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(va_get_ok va_sM) /\ label va_range1
"***** POSTCONDITION NOT MET AT line 58 column 37 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(va_get_vec 1 va_sM == Vale.Math.Poly2.Bits_s.to_quad32 (Vale.Math.Poly2_s.shift a 1))) in
assert_norm (va_qc.mods == va_mods);
va_lemma_norm_mods ([va_Mod_vec 2; va_Mod_vec 1; va_Mod_ok]) va_sM va_s0;
(va_sM, va_fM)
[@"opaque_to_smt"]
let va_wpProof_ShiftLeft128_1 a va_s0 va_k =
let (va_sM, va_f0) = va_lemma_ShiftLeft128_1 (va_code_ShiftLeft128_1 ()) va_s0 a in
va_lemma_upd_update va_sM;
assert (va_state_eq va_sM (va_update_vec 2 va_sM (va_update_vec 1 va_sM (va_update_ok va_sM
va_s0))));
va_lemma_norm_mods ([va_Mod_vec 2; va_Mod_vec 1]) va_sM va_s0;
let va_g = () in
(va_sM, va_f0, va_g)
//--
//-- ShiftLeft2_128_1
val va_code_ShiftLeft2_128_1 : va_dummy:unit -> Tot va_code | {
"checked_file": "/",
"dependencies": [
"Vale.PPC64LE.State.fsti.checked",
"Vale.PPC64LE.QuickCodes.fsti.checked",
"Vale.PPC64LE.QuickCode.fst.checked",
"Vale.PPC64LE.Machine_s.fst.checked",
"Vale.PPC64LE.InsVector.fsti.checked",
"Vale.PPC64LE.InsMem.fsti.checked",
"Vale.PPC64LE.InsBasic.fsti.checked",
"Vale.PPC64LE.Decls.fsti.checked",
"Vale.Math.Poly2_s.fsti.checked",
"Vale.Math.Poly2.Words.fsti.checked",
"Vale.Math.Poly2.Lemmas.fsti.checked",
"Vale.Math.Poly2.Bits_s.fsti.checked",
"Vale.Math.Poly2.Bits.fsti.checked",
"Vale.Math.Poly2.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Arch.TypesNative.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.Types_helpers.fsti.checked",
"Vale.AES.PPC64LE.PolyOps.fsti.checked",
"Vale.AES.GHash_BE.fsti.checked",
"Vale.AES.GF128_s.fsti.checked",
"Vale.AES.GF128.fsti.checked",
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.PPC64LE.GF128_Mul.fst"
} | [
{
"abbrev": false,
"full_module": "Vale.AES.GHash_BE",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.PPC64LE.PolyOps",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Bits",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Bits_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.TypesNative",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash_BE",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.PPC64LE.PolyOps",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Bits",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Bits_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.TypesNative",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.PPC64LE",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.PPC64LE",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | va_dummy: Prims.unit -> Vale.PPC64LE.Decls.va_code | Prims.Tot | [
"total"
] | [] | [
"Prims.unit",
"Vale.PPC64LE.Decls.va_Block",
"Vale.PPC64LE.Decls.va_CCons",
"Vale.PPC64LE.InsVector.va_code_Vspltisw",
"Vale.PPC64LE.Decls.va_op_vec_opr_vec",
"Vale.PPC64LE.InsBasic.va_code_LoadImm64",
"Vale.PPC64LE.Decls.va_op_reg_opr_reg",
"Vale.PPC64LE.InsVector.va_code_Mtvsrws",
"Vale.PPC64LE.InsVector.va_code_Vsrw",
"Vale.PPC64LE.InsVector.va_code_Vsldoi",
"Vale.PPC64LE.InsVector.va_code_Vspltisb",
"Vale.PPC64LE.InsVector.va_code_Vsl",
"Vale.PPC64LE.InsVector.va_code_Vxor",
"Vale.PPC64LE.Decls.va_CNil",
"Vale.PPC64LE.Decls.va_code"
] | [] | false | false | false | true | false | let va_code_ShiftLeft2_128_1 () =
| (va_Block (va_CCons (va_code_Vspltisw (va_op_vec_opr_vec 0) 0)
(va_CCons (va_code_LoadImm64 (va_op_reg_opr_reg 10) 31)
(va_CCons (va_code_Mtvsrws (va_op_vec_opr_vec 3) (va_op_reg_opr_reg 10))
(va_CCons (va_code_Vsrw (va_op_vec_opr_vec 3)
(va_op_vec_opr_vec 1)
(va_op_vec_opr_vec 3))
(va_CCons (va_code_Vsldoi (va_op_vec_opr_vec 3)
(va_op_vec_opr_vec 0)
(va_op_vec_opr_vec 3)
4)
(va_CCons (va_code_Vspltisb (va_op_vec_opr_vec 0) 1)
(va_CCons (va_code_Vsl (va_op_vec_opr_vec 1)
(va_op_vec_opr_vec 1)
(va_op_vec_opr_vec 0))
(va_CCons (va_code_Vsl (va_op_vec_opr_vec 2)
(va_op_vec_opr_vec 2)
(va_op_vec_opr_vec 0))
(va_CCons (va_code_Vxor (va_op_vec_opr_vec 2)
(va_op_vec_opr_vec 2)
(va_op_vec_opr_vec 3))
(va_CNil ()))))))))))) | false |
Steel.Channel.Simplex.fst | Steel.Channel.Simplex.prot_equals | val prot_equals (#q #p: _) (#vr: chan_val) (cc: chan q)
: Steel unit
((pts_to cc.chan_chan.recv half vr) `star` (receiver cc p))
(fun _ -> (pts_to cc.chan_chan.recv half vr) `star` (receiver cc p))
(requires fun _ -> True)
(ensures fun _ _ _ -> step vr.chan_prot vr.chan_msg == p) | val prot_equals (#q #p: _) (#vr: chan_val) (cc: chan q)
: Steel unit
((pts_to cc.chan_chan.recv half vr) `star` (receiver cc p))
(fun _ -> (pts_to cc.chan_chan.recv half vr) `star` (receiver cc p))
(requires fun _ -> True)
(ensures fun _ _ _ -> step vr.chan_prot vr.chan_msg == p) | let prot_equals #q (#p:_) (#vr:chan_val) (cc:chan q)
: Steel unit
(pts_to cc.chan_chan.recv half vr `star` receiver cc p)
(fun _ -> pts_to cc.chan_chan.recv half vr `star` receiver cc p)
(requires fun _ -> True)
(ensures fun _ _ _ -> step vr.chan_prot vr.chan_msg == p)
= let vr' = witness_exists () in
H.higher_ref_pts_to_injective_eq #_ #_ #_ #_ #vr #_ cc.chan_chan.recv;
rewrite_slprop (in_state_slprop _ _) (in_state_slprop p vr) (fun _ -> ());
elim_pure _;
intro_in_state _ _ vr | {
"file_name": "lib/steel/Steel.Channel.Simplex.fst",
"git_rev": "f984200f79bdc452374ae994a5ca837496476c41",
"git_url": "https://github.com/FStarLang/steel.git",
"project_name": "steel"
} | {
"end_col": 25,
"end_line": 439,
"start_col": 0,
"start_line": 429
} | (*
Copyright 2020 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module Steel.Channel.Simplex
module P = Steel.Channel.Protocol
open Steel.SpinLock
open Steel.Memory
open Steel.Effect.Atomic
open Steel.Effect
open Steel.HigherReference
open Steel.FractionalPermission
module MRef = Steel.MonotonicHigherReference
module H = Steel.HigherReference
let sprot = p:prot { more p }
noeq
type chan_val = {
chan_prot : sprot;
chan_msg : msg_t chan_prot;
chan_ctr : nat
}
let mref a p = MRef.ref a p
let trace_ref (p:prot) = mref (partial_trace_of p) extended_to
noeq
type chan_t (p:prot) = {
send: ref chan_val;
recv: ref chan_val;
trace: trace_ref p;
}
let half : perm = half_perm full_perm
let step (s:sprot) (x:msg_t s) = step s x
let chan_inv_step_p (vrecv vsend:chan_val) : prop =
(vsend.chan_prot == step vrecv.chan_prot vrecv.chan_msg /\
vsend.chan_ctr == vrecv.chan_ctr + 1)
let chan_inv_step (vrecv vsend:chan_val) : vprop =
pure (chan_inv_step_p vrecv vsend)
let chan_inv_cond (vsend:chan_val) (vrecv:chan_val) : vprop =
if vsend.chan_ctr = vrecv.chan_ctr
then pure (vsend == vrecv)
else chan_inv_step vrecv vsend
let trace_until_prop #p (r:trace_ref p) (vr:chan_val) (tr: partial_trace_of p) : vprop =
MRef.pts_to r full_perm tr `star`
pure (until tr == step vr.chan_prot vr.chan_msg)
let trace_until #p (r:trace_ref p) (vr:chan_val) =
h_exists (trace_until_prop r vr)
let chan_inv_recv #p (c:chan_t p) (vsend:chan_val) =
h_exists (fun (vrecv:chan_val) ->
pts_to c.recv half vrecv `star`
trace_until c.trace vrecv `star`
chan_inv_cond vsend vrecv)
let chan_inv #p (c:chan_t p) : vprop =
h_exists (fun (vsend:chan_val) ->
pts_to c.send half vsend `star` chan_inv_recv c vsend)
let intro_chan_inv_cond_eqT (vs vr:chan_val)
: Steel unit emp
(fun _ -> chan_inv_cond vs vr)
(requires fun _ -> vs == vr)
(ensures fun _ _ _ -> True)
= intro_pure (vs == vs);
rewrite_slprop (chan_inv_cond vs vs) (chan_inv_cond vs vr) (fun _ -> ())
let intro_chan_inv_cond_stepT (vs vr:chan_val)
: SteelT unit (chan_inv_step vr vs)
(fun _ -> chan_inv_cond vs vr)
= Steel.Utils.extract_pure (chan_inv_step_p vr vs);
rewrite_slprop (chan_inv_step vr vs) (chan_inv_cond vs vr) (fun _ -> ())
let intro_chan_inv_auxT #p (#vs : chan_val)
(#vr : chan_val)
(c:chan_t p)
: SteelT unit (pts_to c.send half vs `star`
pts_to c.recv half vr `star`
trace_until c.trace vr `star`
chan_inv_cond vs vr)
(fun _ -> chan_inv c)
= intro_exists _ (fun (vr:chan_val) -> pts_to c.recv half vr `star` trace_until c.trace vr `star` chan_inv_cond vs vr);
intro_exists _ (fun (vs:chan_val) -> pts_to c.send half vs `star` chan_inv_recv c vs)
let intro_chan_inv_stepT #p (c:chan_t p) (vs vr:chan_val)
: SteelT unit (pts_to c.send half vs `star`
pts_to c.recv half vr `star`
trace_until c.trace vr `star`
chan_inv_step vr vs)
(fun _ -> chan_inv c)
= intro_chan_inv_cond_stepT vs vr;
intro_chan_inv_auxT c
let intro_chan_inv_eqT #p (c:chan_t p) (vs vr:chan_val)
: Steel unit (pts_to c.send half vs `star`
pts_to c.recv half vr `star`
trace_until c.trace vr)
(fun _ -> chan_inv c)
(requires fun _ -> vs == vr)
(ensures fun _ _ _ -> True)
= intro_chan_inv_cond_eqT vs vr;
intro_chan_inv_auxT c
noeq
type chan p = {
chan_chan : chan_t p;
chan_lock : lock (chan_inv chan_chan)
}
let in_state_prop (p:prot) (vsend:chan_val) : prop =
p == step vsend.chan_prot vsend.chan_msg
irreducible
let next_chan_val (#p:sprot) (x:msg_t p) (vs0:chan_val { in_state_prop p vs0 })
: Tot (vs:chan_val{in_state_prop (step p x) vs /\ chan_inv_step_p vs0 vs})
= {
chan_prot = (step vs0.chan_prot vs0.chan_msg);
chan_msg = x;
chan_ctr = vs0.chan_ctr + 1
}
[@@__reduce__]
let in_state_slprop (p:prot) (vsend:chan_val) : vprop = pure (in_state_prop p vsend)
let in_state (r:ref chan_val) (p:prot) =
h_exists (fun (vsend:chan_val) ->
pts_to r half vsend `star` in_state_slprop p vsend)
let sender #q (c:chan q) (p:prot) = in_state c.chan_chan.send p
let receiver #q (c:chan q) (p:prot) = in_state c.chan_chan.recv p
let intro_chan_inv #p (c:chan_t p) (v:chan_val)
: SteelT unit (pts_to c.send half v `star`
pts_to c.recv half v `star`
trace_until c.trace v)
(fun _ -> chan_inv c)
= intro_chan_inv_eqT c v v
let chan_val_p (p:prot) = (vs0:chan_val { in_state_prop p vs0 })
let intro_in_state (r:ref chan_val) (p:prot) (v:chan_val_p p)
: SteelT unit (pts_to r half v) (fun _ -> in_state r p)
= intro_pure (in_state_prop p v);
intro_exists v (fun (v:chan_val) -> pts_to r half v `star` in_state_slprop p v)
let msg t p = Msg Send unit (fun _ -> p)
let init_chan_val (p:prot) = v:chan_val {v.chan_prot == msg unit p}
let initial_trace (p:prot) : (q:partial_trace_of p {until q == p})
= { to = p; tr=Waiting p}
let intro_trace_until #q (r:trace_ref q) (tr:partial_trace_of q) (v:chan_val)
: Steel unit (MRef.pts_to r full_perm tr)
(fun _ -> trace_until r v)
(requires fun _ -> until tr == step v.chan_prot v.chan_msg)
(ensures fun _ _ _ -> True)
= intro_pure (until tr == step v.chan_prot v.chan_msg);
intro_exists tr
(fun (tr:partial_trace_of q) ->
MRef.pts_to r full_perm tr `star`
pure (until tr == (step v.chan_prot v.chan_msg)));
()
let chan_t_sr (p:prot) (send recv:ref chan_val) = (c:chan_t p{c.send == send /\ c.recv == recv})
let intro_trace_until_init #p (c:chan_t p) (v:init_chan_val p)
: SteelT unit (MRef.pts_to c.trace full_perm (initial_trace p))
(fun _ -> trace_until c.trace v)
= intro_pure (until (initial_trace p) == step v.chan_prot v.chan_msg);
//TODO: Not sure why I need this rewrite
rewrite_slprop (MRef.pts_to c.trace full_perm (initial_trace p) `star`
pure (until (initial_trace p) == step v.chan_prot v.chan_msg))
(MRef.pts_to c.trace full_perm (initial_trace p) `star`
pure (until (initial_trace p) == step v.chan_prot v.chan_msg))
(fun _ -> ());
intro_exists (initial_trace p) (trace_until_prop c.trace v)
let mk_chan (#p:prot) (send recv:ref chan_val) (v:init_chan_val p)
: SteelT (chan_t_sr p send recv)
(pts_to send half v `star` pts_to recv half v)
(fun c -> chan_inv c)
= let tr: trace_ref p = MRef.alloc (extended_to #p) (initial_trace p) in
let c = Mkchan_t send recv tr in
rewrite_slprop
(MRef.pts_to tr full_perm (initial_trace p))
(MRef.pts_to c.trace full_perm (initial_trace p)) (fun _ -> ());
intro_trace_until_init c v;
rewrite_slprop
(pts_to send half v `star` pts_to recv half v)
(pts_to c.send half v `star` pts_to c.recv half v)
(fun _ -> ());
intro_chan_inv #p c v;
let c' : chan_t_sr p send recv = c in
rewrite_slprop (chan_inv c) (chan_inv c') (fun _ -> ());
return c'
let new_chan (p:prot) : SteelT (chan p) emp (fun c -> sender c p `star` receiver c p)
= let q = msg unit p in
let v : chan_val = { chan_prot = q; chan_msg = (); chan_ctr = 0 } in
let vp : init_chan_val p = v in
let send = H.alloc v in
let recv = H.alloc v in
H.share recv;
H.share send;
(* TODO: use smt_fallback *)
rewrite_slprop (pts_to send (half_perm full_perm) v `star`
pts_to send (half_perm full_perm) v `star`
pts_to recv (half_perm full_perm) v `star`
pts_to recv (half_perm full_perm) v)
(pts_to send half vp `star`
pts_to send half vp `star`
pts_to recv half vp `star`
pts_to recv half vp)
(fun _ -> ());
let c = mk_chan send recv vp in
intro_in_state send p vp;
intro_in_state recv p vp;
let l = Steel.SpinLock.new_lock (chan_inv c) in
let ch = { chan_chan = c; chan_lock = l } in
rewrite_slprop (in_state send p) (sender ch p) (fun _ -> ());
rewrite_slprop (in_state recv p) (receiver ch p) (fun _ -> ());
return ch
[@@__reduce__]
let send_recv_in_sync (r:ref chan_val) (p:prot{more p}) #q (c:chan_t q) (vs vr:chan_val) : vprop =
(pts_to c.send half vs `star`
pts_to c.recv half vr `star`
trace_until c.trace vr `star`
pure (vs == vr) `star`
in_state r p)
[@@__reduce__]
let sender_ahead (r:ref chan_val) (p:prot{more p}) #q (c:chan_t q) (vs vr:chan_val) : vprop =
(pts_to c.send half vs `star`
pts_to c.recv half vr `star`
trace_until c.trace vr `star`
chan_inv_step vr vs `star`
in_state r p)
let update_channel (#p:sprot) #q (c:chan_t q) (x:msg_t p) (vs:chan_val) (r:ref chan_val)
: SteelT chan_val
(pts_to r full_perm vs `star` in_state_slprop p vs)
(fun vs' -> pts_to r full_perm vs' `star` (in_state_slprop (step p x) vs' `star` chan_inv_step vs vs'))
= elim_pure (in_state_prop p vs);
let vs' = next_chan_val x vs in
H.write r vs';
intro_pure (in_state_prop (step p x) vs');
intro_pure (chan_inv_step_p vs vs');
return vs'
[@@__reduce__]
let send_pre_available (p:sprot) #q (c:chan_t q) (vs vr:chan_val) = send_recv_in_sync c.send p c vs vr
let gather_r (#p:sprot) (r:ref chan_val) (v:chan_val)
: SteelT unit
(pts_to r half v `star` in_state r p)
(fun _ -> pts_to r full_perm v `star` in_state_slprop p v)
= let v' = witness_exists () in
H.higher_ref_pts_to_injective_eq #_ #_ #_ #_ #v #_ r;
H.gather #_ #_ #half #half #v #v r;
rewrite_slprop (pts_to r (sum_perm half half) v) (pts_to r full_perm v) (fun _ -> ());
rewrite_slprop (in_state_slprop p v') (in_state_slprop p v) (fun _ -> ())
let send_available (#p:sprot) #q (cc:chan q) (x:msg_t p) (vs vr:chan_val) (_:unit)
: SteelT unit (send_pre_available p #q cc.chan_chan vs vr) (fun _ -> sender cc (step p x))
= Steel.Utils.extract_pure (vs == vr);
Steel.Utils.rewrite #_ #(send_recv_in_sync cc.chan_chan.send p cc.chan_chan vs) vr vs;
elim_pure (vs == vs);
gather_r cc.chan_chan.send vs;
let next_vs = update_channel cc.chan_chan x vs cc.chan_chan.send in
H.share cc.chan_chan.send;
intro_exists next_vs (fun (next_vs:chan_val) -> pts_to cc.chan_chan.send half next_vs `star` in_state_slprop (step p x) next_vs);
intro_chan_inv_stepT cc.chan_chan next_vs vs;
Steel.SpinLock.release cc.chan_lock
let extensible (#p:prot) (x:partial_trace_of p) = P.more x.to
let next_msg_t (#p:prot) (x:partial_trace_of p) = P.next_msg_t x.to
let next_trace #p (vr:chan_val) (vs:chan_val)
(tr:partial_trace_of p)
(s:squash (until tr == step vr.chan_prot vr.chan_msg))
(_:squash (chan_inv_step_p vr vs))
: (ts:partial_trace_of p { until ts == step vs.chan_prot vs.chan_msg })
= let msg : next_msg_t tr = vs.chan_msg in
assert (extensible tr);
extend_partial_trace tr msg
let next_trace_st #p (vr:chan_val) (vs:chan_val) (tr:partial_trace_of p)
: Steel (extension_of tr)
(chan_inv_step vr vs)
(fun _ -> emp)
(requires fun _ -> until tr == step vr.chan_prot vr.chan_msg)
(ensures fun _ ts _ -> until ts == step vs.chan_prot vs.chan_msg)
= elim_pure (chan_inv_step_p vr vs);
let ts : extension_of tr = next_trace vr vs tr () () in
return ts
let update_trace #p (r:trace_ref p) (vr:chan_val) (vs:chan_val)
: Steel unit
(trace_until r vr)
(fun _ -> trace_until r vs)
(requires fun _ -> chan_inv_step_p vr vs)
(ensures fun _ _ _ -> True)
= intro_pure (chan_inv_step_p vr vs);
let tr = MRef.read_refine r in
elim_pure (until tr == step vr.chan_prot vr.chan_msg);
let ts : extension_of tr = next_trace_st vr vs tr in
MRef.write r ts;
intro_pure (until ts == step vs.chan_prot vs.chan_msg);
intro_exists ts
(fun (ts:partial_trace_of p) ->
MRef.pts_to r full_perm ts `star`
pure (until ts == step vs.chan_prot vs.chan_msg))
let recv_availableT (#p:sprot) #q (cc:chan q) (vs vr:chan_val) (_:unit)
: SteelT (msg_t p)
(sender_ahead cc.chan_chan.recv p cc.chan_chan vs vr)
(fun x -> receiver cc (step p x))
= elim_pure (chan_inv_step_p vr vs);
gather_r cc.chan_chan.recv vr;
elim_pure (in_state_prop p vr);
H.write cc.chan_chan.recv vs;
H.share cc.chan_chan.recv;
assert (vs.chan_prot == p);
let vs_msg : msg_t p = vs.chan_msg in
intro_pure (in_state_prop (step p vs_msg) vs);
intro_exists vs (fun (vs:chan_val) -> pts_to cc.chan_chan.recv half vs `star` in_state_slprop (step p vs_msg) vs);
update_trace cc.chan_chan.trace vr vs;
intro_chan_inv cc.chan_chan vs;
Steel.SpinLock.release cc.chan_lock;
vs_msg
#push-options "--ide_id_info_off"
let send_receive_prelude (#p:prot) (cc:chan p)
: SteelT (chan_val & chan_val)
emp
(fun v ->
pts_to cc.chan_chan.send half (fst v) `star`
pts_to cc.chan_chan.recv half (snd v) `star`
trace_until cc.chan_chan.trace (snd v) `star`
chan_inv_cond (fst v) (snd v))
= let c = cc.chan_chan in
Steel.SpinLock.acquire cc.chan_lock;
let vs = read_refine (chan_inv_recv cc.chan_chan) cc.chan_chan.send in
let _ = witness_exists () in
let vr = H.read cc.chan_chan.recv in
rewrite_slprop (trace_until _ _ `star` chan_inv_cond _ _)
(trace_until cc.chan_chan.trace vr `star` chan_inv_cond vs vr)
(fun _ -> ());
return (vs, vr)
let rec send (#p:prot) (c:chan p) (#next:prot{more next}) (x:msg_t next)
: SteelT unit (sender c next) (fun _ -> sender c (step next x))
= let v = send_receive_prelude c in //matching v as vs,vr fails
if (fst v).chan_ctr = (snd v).chan_ctr
then (
rewrite_slprop (chan_inv_cond (fst v) (snd v))
(pure (fst v == snd v))
(fun _ -> ());
send_available c x (fst v) (snd v) () //TODO: inlining send_availableT here fails
)
else (
rewrite_slprop (chan_inv_cond (fst v) (snd v))
(chan_inv_step (snd v) (fst v))
(fun _ -> ());
intro_chan_inv_stepT c.chan_chan (fst v) (snd v);
Steel.SpinLock.release c.chan_lock;
send c x
)
let rec recv (#p:prot) (#next:prot{more next}) (c:chan p)
: SteelT (msg_t next) (receiver c next) (fun x -> receiver c (step next x))
= let v = send_receive_prelude c in
if (fst v).chan_ctr = (snd v).chan_ctr
then (
rewrite_slprop (chan_inv_cond (fst v) (snd v))
(pure (fst v == snd v))
(fun _ -> ());
elim_pure (fst v == snd v);
intro_chan_inv_eqT c.chan_chan (fst v) (snd v);
Steel.SpinLock.release c.chan_lock;
recv c
)
else (
rewrite_slprop (chan_inv_cond (fst v) (snd v))
(chan_inv_step (snd v) (fst v))
(fun _ -> ());
recv_availableT c (fst v) (snd v) ()
)
let history_p' (#p:prot) (t:partial_trace_of p) (s:partial_trace_of p) : prop =
t `extended_to` s /\ True
let history_p (#p:prot) (t:partial_trace_of p) : MRef.stable_property extended_to =
history_p' t
let history (#p:prot) (c:chan p) (t:partial_trace_of p) : Type0 =
MRef.witnessed c.chan_chan.trace (history_p t)
let recall_trace_ref #q (r:trace_ref q) (tr tr':partial_trace_of q)
(tok:MRef.witnessed r (history_p tr))
: Steel unit
(MRef.pts_to r full_perm tr')
(fun _ -> MRef.pts_to r full_perm tr')
(requires fun _ -> True)
(ensures fun _ _ _ -> history_p tr tr')
= MRef.recall (history_p tr) r tr' tok | {
"checked_file": "/",
"dependencies": [
"Steel.Utils.fst.checked",
"Steel.SpinLock.fsti.checked",
"Steel.MonotonicHigherReference.fsti.checked",
"Steel.Memory.fsti.checked",
"Steel.HigherReference.fsti.checked",
"Steel.FractionalPermission.fst.checked",
"Steel.Effect.Atomic.fsti.checked",
"Steel.Effect.fsti.checked",
"Steel.Channel.Protocol.fst.checked",
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Steel.Channel.Simplex.fst"
} | [
{
"abbrev": true,
"full_module": "Steel.HigherReference",
"short_module": "H"
},
{
"abbrev": true,
"full_module": "Steel.MonotonicHigherReference",
"short_module": "MRef"
},
{
"abbrev": false,
"full_module": "Steel.FractionalPermission",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel.HigherReference",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel.Effect",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel.Effect.Atomic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel.SpinLock",
"short_module": null
},
{
"abbrev": true,
"full_module": "Steel.Channel.Protocol",
"short_module": "P"
},
{
"abbrev": false,
"full_module": "Steel.Effect",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel.Channel.Protocol",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel.Channel",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel.Channel",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | cc: Steel.Channel.Simplex.chan q -> Steel.Effect.Steel Prims.unit | Steel.Effect.Steel | [] | [] | [
"Steel.Channel.Simplex.prot",
"Steel.Channel.Simplex.chan_val",
"Steel.Channel.Simplex.chan",
"Steel.Channel.Simplex.intro_in_state",
"Steel.Channel.Simplex.__proj__Mkchan_t__item__recv",
"Steel.Channel.Simplex.__proj__Mkchan__item__chan_chan",
"Prims.unit",
"Steel.Effect.Atomic.elim_pure",
"FStar.Ghost.hide",
"FStar.Set.set",
"Steel.Memory.iname",
"FStar.Set.empty",
"Steel.Channel.Simplex.in_state_prop",
"Steel.Effect.Atomic.rewrite_slprop",
"Steel.Channel.Simplex.in_state_slprop",
"FStar.Ghost.reveal",
"Steel.Memory.mem",
"Steel.HigherReference.higher_ref_pts_to_injective_eq",
"Steel.Channel.Simplex.half",
"FStar.Ghost.erased",
"Steel.Effect.Atomic.witness_exists",
"Steel.Effect.Common.star",
"Steel.HigherReference.pts_to",
"Steel.Effect.Common.vprop",
"Steel.Channel.Simplex.receiver",
"Steel.Effect.Common.rmem",
"Prims.l_True",
"Prims.eq2",
"Steel.Channel.Protocol.protocol",
"Steel.Channel.Simplex.step",
"Steel.Channel.Simplex.__proj__Mkchan_val__item__chan_prot",
"Steel.Channel.Simplex.__proj__Mkchan_val__item__chan_msg"
] | [] | false | true | false | false | false | let prot_equals #q (#p: _) (#vr: chan_val) (cc: chan q)
: Steel unit
((pts_to cc.chan_chan.recv half vr) `star` (receiver cc p))
(fun _ -> (pts_to cc.chan_chan.recv half vr) `star` (receiver cc p))
(requires fun _ -> True)
(ensures fun _ _ _ -> step vr.chan_prot vr.chan_msg == p) =
| let vr' = witness_exists () in
H.higher_ref_pts_to_injective_eq #_ #_ #_ #_ #vr #_ cc.chan_chan.recv;
rewrite_slprop (in_state_slprop _ _) (in_state_slprop p vr) (fun _ -> ());
elim_pure _;
intro_in_state _ _ vr | false |
Vale.AES.PPC64LE.GF128_Mul.fst | Vale.AES.PPC64LE.GF128_Mul.va_code_ClmulRev64High | val va_code_ClmulRev64High : va_dummy:unit -> Tot va_code | val va_code_ClmulRev64High : va_dummy:unit -> Tot va_code | let va_code_ClmulRev64High () =
(va_Block (va_CCons (va_code_VPolyMulHigh (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 1)
(va_op_vec_opr_vec 2)) (va_CCons (va_code_ShiftLeft128_1 ()) (va_CNil ())))) | {
"file_name": "obj/Vale.AES.PPC64LE.GF128_Mul.fst",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 80,
"end_line": 235,
"start_col": 0,
"start_line": 233
} | module Vale.AES.PPC64LE.GF128_Mul
open Vale.Def.Types_s
open Vale.Arch.Types
open Vale.Arch.TypesNative
open Vale.Math.Poly2_s
open Vale.Math.Poly2
open Vale.Math.Poly2.Bits_s
open Vale.Math.Poly2.Bits
open Vale.Math.Poly2.Lemmas
open Vale.AES.GF128_s
open Vale.AES.GF128
open Vale.PPC64LE.Machine_s
open Vale.PPC64LE.State
open Vale.PPC64LE.Decls
open Vale.PPC64LE.InsBasic
open Vale.PPC64LE.InsMem
open Vale.PPC64LE.InsVector
open Vale.PPC64LE.QuickCode
open Vale.PPC64LE.QuickCodes
open Vale.AES.PPC64LE.PolyOps
open Vale.AES.Types_helpers
open Vale.AES.GHash_BE
//-- ShiftLeft128_1
[@ "opaque_to_smt" va_qattr]
let va_code_ShiftLeft128_1 () =
(va_Block (va_CCons (va_code_Vspltisb (va_op_vec_opr_vec 2) 1) (va_CCons (va_code_Vsl
(va_op_vec_opr_vec 1) (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 2)) (va_CNil ()))))
[@ "opaque_to_smt" va_qattr]
let va_codegen_success_ShiftLeft128_1 () =
(va_pbool_and (va_codegen_success_Vspltisb (va_op_vec_opr_vec 2) 1) (va_pbool_and
(va_codegen_success_Vsl (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 2))
(va_ttrue ())))
[@ "opaque_to_smt" va_qattr]
let va_qcode_ShiftLeft128_1 (va_mods:va_mods_t) (a:poly) : (va_quickCode unit
(va_code_ShiftLeft128_1 ())) =
(qblock va_mods (fun (va_s:va_state) -> let (va_old_s:va_state) = va_s in va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 60 column 13 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(va_quick_Vspltisb (va_op_vec_opr_vec 2) 1) (va_QBind va_range1
"***** PRECONDITION NOT MET AT line 61 column 8 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(va_quick_Vsl (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 2)) (fun
(va_s:va_state) _ -> let (va_arg5:Vale.Math.Poly2_s.poly) = a in va_qPURE va_range1
"***** PRECONDITION NOT MET AT line 63 column 23 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(fun (_:unit) -> Vale.AES.GF128.lemma_shift_left_1 va_arg5) (va_QEmpty (()))))))
[@"opaque_to_smt"]
let va_lemma_ShiftLeft128_1 va_b0 va_s0 a =
let (va_mods:va_mods_t) = [va_Mod_vec 2; va_Mod_vec 1; va_Mod_ok] in
let va_qc = va_qcode_ShiftLeft128_1 va_mods a in
let (va_sM, va_fM, va_g) = va_wp_sound_code_norm (va_code_ShiftLeft128_1 ()) va_qc va_s0 (fun
va_s0 va_sM va_g -> let () = va_g in label va_range1
"***** POSTCONDITION NOT MET AT line 49 column 1 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(va_get_ok va_sM) /\ label va_range1
"***** POSTCONDITION NOT MET AT line 58 column 37 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(va_get_vec 1 va_sM == Vale.Math.Poly2.Bits_s.to_quad32 (Vale.Math.Poly2_s.shift a 1))) in
assert_norm (va_qc.mods == va_mods);
va_lemma_norm_mods ([va_Mod_vec 2; va_Mod_vec 1; va_Mod_ok]) va_sM va_s0;
(va_sM, va_fM)
[@"opaque_to_smt"]
let va_wpProof_ShiftLeft128_1 a va_s0 va_k =
let (va_sM, va_f0) = va_lemma_ShiftLeft128_1 (va_code_ShiftLeft128_1 ()) va_s0 a in
va_lemma_upd_update va_sM;
assert (va_state_eq va_sM (va_update_vec 2 va_sM (va_update_vec 1 va_sM (va_update_ok va_sM
va_s0))));
va_lemma_norm_mods ([va_Mod_vec 2; va_Mod_vec 1]) va_sM va_s0;
let va_g = () in
(va_sM, va_f0, va_g)
//--
//-- ShiftLeft2_128_1
val va_code_ShiftLeft2_128_1 : va_dummy:unit -> Tot va_code
[@ "opaque_to_smt" va_qattr]
let va_code_ShiftLeft2_128_1 () =
(va_Block (va_CCons (va_code_Vspltisw (va_op_vec_opr_vec 0) 0) (va_CCons (va_code_LoadImm64
(va_op_reg_opr_reg 10) 31) (va_CCons (va_code_Mtvsrws (va_op_vec_opr_vec 3) (va_op_reg_opr_reg
10)) (va_CCons (va_code_Vsrw (va_op_vec_opr_vec 3) (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 3))
(va_CCons (va_code_Vsldoi (va_op_vec_opr_vec 3) (va_op_vec_opr_vec 0) (va_op_vec_opr_vec 3) 4)
(va_CCons (va_code_Vspltisb (va_op_vec_opr_vec 0) 1) (va_CCons (va_code_Vsl (va_op_vec_opr_vec
1) (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 0)) (va_CCons (va_code_Vsl (va_op_vec_opr_vec 2)
(va_op_vec_opr_vec 2) (va_op_vec_opr_vec 0)) (va_CCons (va_code_Vxor (va_op_vec_opr_vec 2)
(va_op_vec_opr_vec 2) (va_op_vec_opr_vec 3)) (va_CNil ())))))))))))
val va_codegen_success_ShiftLeft2_128_1 : va_dummy:unit -> Tot va_pbool
[@ "opaque_to_smt" va_qattr]
let va_codegen_success_ShiftLeft2_128_1 () =
(va_pbool_and (va_codegen_success_Vspltisw (va_op_vec_opr_vec 0) 0) (va_pbool_and
(va_codegen_success_LoadImm64 (va_op_reg_opr_reg 10) 31) (va_pbool_and
(va_codegen_success_Mtvsrws (va_op_vec_opr_vec 3) (va_op_reg_opr_reg 10)) (va_pbool_and
(va_codegen_success_Vsrw (va_op_vec_opr_vec 3) (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 3))
(va_pbool_and (va_codegen_success_Vsldoi (va_op_vec_opr_vec 3) (va_op_vec_opr_vec 0)
(va_op_vec_opr_vec 3) 4) (va_pbool_and (va_codegen_success_Vspltisb (va_op_vec_opr_vec 0) 1)
(va_pbool_and (va_codegen_success_Vsl (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 1)
(va_op_vec_opr_vec 0)) (va_pbool_and (va_codegen_success_Vsl (va_op_vec_opr_vec 2)
(va_op_vec_opr_vec 2) (va_op_vec_opr_vec 0)) (va_pbool_and (va_codegen_success_Vxor
(va_op_vec_opr_vec 2) (va_op_vec_opr_vec 2) (va_op_vec_opr_vec 3)) (va_ttrue ()))))))))))
[@ "opaque_to_smt" va_qattr]
let va_qcode_ShiftLeft2_128_1 (va_mods:va_mods_t) (lo:poly) (hi:poly) : (va_quickCode unit
(va_code_ShiftLeft2_128_1 ())) =
(qblock va_mods (fun (va_s:va_state) -> let (va_old_s:va_state) = va_s in va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 83 column 13 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(va_quick_Vspltisw (va_op_vec_opr_vec 0) 0) (va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 84 column 14 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(va_quick_LoadImm64 (va_op_reg_opr_reg 10) 31) (va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 85 column 12 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(va_quick_Mtvsrws (va_op_vec_opr_vec 3) (va_op_reg_opr_reg 10)) (va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 86 column 9 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(va_quick_Vsrw (va_op_vec_opr_vec 3) (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 3)) (va_QSeq
va_range1
"***** PRECONDITION NOT MET AT line 87 column 11 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(va_quick_Vsldoi (va_op_vec_opr_vec 3) (va_op_vec_opr_vec 0) (va_op_vec_opr_vec 3) 4) (va_QSeq
va_range1
"***** PRECONDITION NOT MET AT line 88 column 13 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(va_quick_Vspltisb (va_op_vec_opr_vec 0) 1) (va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 89 column 8 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(va_quick_Vsl (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 0)) (va_QSeq
va_range1
"***** PRECONDITION NOT MET AT line 90 column 8 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(va_quick_Vsl (va_op_vec_opr_vec 2) (va_op_vec_opr_vec 2) (va_op_vec_opr_vec 0)) (va_QBind
va_range1
"***** PRECONDITION NOT MET AT line 91 column 9 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(va_quick_Vxor (va_op_vec_opr_vec 2) (va_op_vec_opr_vec 2) (va_op_vec_opr_vec 3)) (fun
(va_s:va_state) _ -> let (l:(Vale.Def.Words_s.four Vale.Def.Words_s.nat32)) =
Vale.Def.Words.Four_s.four_map #nat32 #Vale.Def.Words_s.nat32 (fun (i:nat32) ->
Vale.Arch.Types.ishl32 i 1) (va_get_vec 2 va_old_s) in let (r:(Vale.Def.Words_s.four
Vale.Def.Words_s.nat32)) = Vale.Def.Words.Four_s.four_map #nat32 #Vale.Def.Words_s.nat32 (fun
(i:nat32) -> Vale.Arch.Types.ishr32 i 31) (va_get_vec 2 va_old_s) in let
(va_arg22:Vale.Def.Types_s.quad32) = va_get_vec 3 va_s in let
(va_arg21:Vale.Def.Types_s.quad32) = Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0
(Vale.Def.Words_s.__proj__Mkfour__item__lo0 r) (Vale.Def.Words_s.__proj__Mkfour__item__lo1 r)
(Vale.Def.Words_s.__proj__Mkfour__item__hi2 r) in va_qPURE va_range1
"***** PRECONDITION NOT MET AT line 95 column 30 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(fun (_:unit) -> Vale.Arch.TypesNative.lemma_quad32_xor_commutes va_arg21 va_arg22) (let
(va_arg20:Vale.Def.Types_s.quad32) = va_get_vec 3 va_s in let
(va_arg19:Vale.Def.Types_s.quad32) = Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0
(Vale.Def.Words_s.__proj__Mkfour__item__lo0 r) (Vale.Def.Words_s.__proj__Mkfour__item__lo1 r)
(Vale.Def.Words_s.__proj__Mkfour__item__hi2 r) in let (va_arg18:Vale.Def.Types_s.quad32) = l in
va_qPURE va_range1
"***** PRECONDITION NOT MET AT line 96 column 32 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(fun (_:unit) -> Vale.Arch.TypesNative.lemma_quad32_xor_associates va_arg18 va_arg19 va_arg20)
(let (va_arg17:Vale.Math.Poly2_s.poly) = hi in let (va_arg16:Vale.Math.Poly2_s.poly) = lo in
va_qPURE va_range1
"***** PRECONDITION NOT MET AT line 98 column 25 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(fun (_:unit) -> Vale.AES.GF128.lemma_shift_2_left_1 va_arg16 va_arg17) (va_QEmpty
(())))))))))))))))
val va_lemma_ShiftLeft2_128_1 : va_b0:va_code -> va_s0:va_state -> lo:poly -> hi:poly
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_ShiftLeft2_128_1 ()) va_s0 /\ va_get_ok va_s0 /\
Vale.Math.Poly2_s.degree hi < 127 /\ Vale.Math.Poly2_s.degree lo <= 127 /\ va_get_vec 1 va_s0
== Vale.Math.Poly2.Bits_s.to_quad32 lo /\ va_get_vec 2 va_s0 ==
Vale.Math.Poly2.Bits_s.to_quad32 hi))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
(let n = Vale.Math.Poly2_s.monomial 128 in let a = Vale.Math.Poly2_s.add (Vale.Math.Poly2_s.mul
hi n) lo in let b = Vale.Math.Poly2_s.shift a 1 in va_get_vec 1 va_sM ==
Vale.Math.Poly2.Bits_s.to_quad32 (Vale.Math.Poly2_s.mod b n) /\ va_get_vec 2 va_sM ==
Vale.Math.Poly2.Bits_s.to_quad32 (Vale.Math.Poly2_s.div b n)) /\ va_state_eq va_sM
(va_update_vec 3 va_sM (va_update_vec 2 va_sM (va_update_vec 1 va_sM (va_update_vec 0 va_sM
(va_update_reg 10 va_sM (va_update_ok va_sM va_s0))))))))
[@"opaque_to_smt"]
let va_lemma_ShiftLeft2_128_1 va_b0 va_s0 lo hi =
let (va_mods:va_mods_t) = [va_Mod_vec 3; va_Mod_vec 2; va_Mod_vec 1; va_Mod_vec 0; va_Mod_reg 10;
va_Mod_ok] in
let va_qc = va_qcode_ShiftLeft2_128_1 va_mods lo hi in
let (va_sM, va_fM, va_g) = va_wp_sound_code_norm (va_code_ShiftLeft2_128_1 ()) va_qc va_s0 (fun
va_s0 va_sM va_g -> let () = va_g in label va_range1
"***** POSTCONDITION NOT MET AT line 66 column 1 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(va_get_ok va_sM) /\ label va_range1
"***** POSTCONDITION NOT MET AT line 77 column 9 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(let n = Vale.Math.Poly2_s.monomial 128 in label va_range1
"***** POSTCONDITION NOT MET AT line 78 column 9 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(let a = Vale.Math.Poly2_s.add (Vale.Math.Poly2_s.mul hi n) lo in label va_range1
"***** POSTCONDITION NOT MET AT line 79 column 9 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(let b = Vale.Math.Poly2_s.shift a 1 in label va_range1
"***** POSTCONDITION NOT MET AT line 80 column 35 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(va_get_vec 1 va_sM == Vale.Math.Poly2.Bits_s.to_quad32 (Vale.Math.Poly2_s.mod b n)) /\ label
va_range1
"***** POSTCONDITION NOT MET AT line 81 column 35 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(va_get_vec 2 va_sM == Vale.Math.Poly2.Bits_s.to_quad32 (Vale.Math.Poly2_s.div b n)))))) in
assert_norm (va_qc.mods == va_mods);
va_lemma_norm_mods ([va_Mod_vec 3; va_Mod_vec 2; va_Mod_vec 1; va_Mod_vec 0; va_Mod_reg 10;
va_Mod_ok]) va_sM va_s0;
(va_sM, va_fM)
[@ va_qattr]
let va_wp_ShiftLeft2_128_1 (lo:poly) (hi:poly) (va_s0:va_state) (va_k:(va_state -> unit -> Type0))
: Type0 =
(va_get_ok va_s0 /\ Vale.Math.Poly2_s.degree hi < 127 /\ Vale.Math.Poly2_s.degree lo <= 127 /\
va_get_vec 1 va_s0 == Vale.Math.Poly2.Bits_s.to_quad32 lo /\ va_get_vec 2 va_s0 ==
Vale.Math.Poly2.Bits_s.to_quad32 hi /\ (forall (va_x_r10:nat64) (va_x_v0:quad32)
(va_x_v1:quad32) (va_x_v2:quad32) (va_x_v3:quad32) . let va_sM = va_upd_vec 3 va_x_v3
(va_upd_vec 2 va_x_v2 (va_upd_vec 1 va_x_v1 (va_upd_vec 0 va_x_v0 (va_upd_reg 10 va_x_r10
va_s0)))) in va_get_ok va_sM /\ (let n = Vale.Math.Poly2_s.monomial 128 in let a =
Vale.Math.Poly2_s.add (Vale.Math.Poly2_s.mul hi n) lo in let b = Vale.Math.Poly2_s.shift a 1 in
va_get_vec 1 va_sM == Vale.Math.Poly2.Bits_s.to_quad32 (Vale.Math.Poly2_s.mod b n) /\
va_get_vec 2 va_sM == Vale.Math.Poly2.Bits_s.to_quad32 (Vale.Math.Poly2_s.div b n)) ==> va_k
va_sM (())))
val va_wpProof_ShiftLeft2_128_1 : lo:poly -> hi:poly -> va_s0:va_state -> va_k:(va_state -> unit ->
Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_ShiftLeft2_128_1 lo hi va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_ShiftLeft2_128_1 ()) ([va_Mod_vec 3;
va_Mod_vec 2; va_Mod_vec 1; va_Mod_vec 0; va_Mod_reg 10]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@"opaque_to_smt"]
let va_wpProof_ShiftLeft2_128_1 lo hi va_s0 va_k =
let (va_sM, va_f0) = va_lemma_ShiftLeft2_128_1 (va_code_ShiftLeft2_128_1 ()) va_s0 lo hi in
va_lemma_upd_update va_sM;
assert (va_state_eq va_sM (va_update_vec 3 va_sM (va_update_vec 2 va_sM (va_update_vec 1 va_sM
(va_update_vec 0 va_sM (va_update_reg 10 va_sM (va_update_ok va_sM va_s0)))))));
va_lemma_norm_mods ([va_Mod_vec 3; va_Mod_vec 2; va_Mod_vec 1; va_Mod_vec 0; va_Mod_reg 10])
va_sM va_s0;
let va_g = () in
(va_sM, va_f0, va_g)
[@ "opaque_to_smt" va_qattr]
let va_quick_ShiftLeft2_128_1 (lo:poly) (hi:poly) : (va_quickCode unit (va_code_ShiftLeft2_128_1
())) =
(va_QProc (va_code_ShiftLeft2_128_1 ()) ([va_Mod_vec 3; va_Mod_vec 2; va_Mod_vec 1; va_Mod_vec 0;
va_Mod_reg 10]) (va_wp_ShiftLeft2_128_1 lo hi) (va_wpProof_ShiftLeft2_128_1 lo hi))
//--
//-- ClmulRev64High
val va_code_ClmulRev64High : va_dummy:unit -> Tot va_code | {
"checked_file": "/",
"dependencies": [
"Vale.PPC64LE.State.fsti.checked",
"Vale.PPC64LE.QuickCodes.fsti.checked",
"Vale.PPC64LE.QuickCode.fst.checked",
"Vale.PPC64LE.Machine_s.fst.checked",
"Vale.PPC64LE.InsVector.fsti.checked",
"Vale.PPC64LE.InsMem.fsti.checked",
"Vale.PPC64LE.InsBasic.fsti.checked",
"Vale.PPC64LE.Decls.fsti.checked",
"Vale.Math.Poly2_s.fsti.checked",
"Vale.Math.Poly2.Words.fsti.checked",
"Vale.Math.Poly2.Lemmas.fsti.checked",
"Vale.Math.Poly2.Bits_s.fsti.checked",
"Vale.Math.Poly2.Bits.fsti.checked",
"Vale.Math.Poly2.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Arch.TypesNative.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.Types_helpers.fsti.checked",
"Vale.AES.PPC64LE.PolyOps.fsti.checked",
"Vale.AES.GHash_BE.fsti.checked",
"Vale.AES.GF128_s.fsti.checked",
"Vale.AES.GF128.fsti.checked",
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.PPC64LE.GF128_Mul.fst"
} | [
{
"abbrev": false,
"full_module": "Vale.AES.GHash_BE",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.PPC64LE.PolyOps",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Bits",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Bits_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.TypesNative",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash_BE",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.PPC64LE.PolyOps",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Bits",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Bits_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.TypesNative",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.PPC64LE",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.PPC64LE",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | va_dummy: Prims.unit -> Vale.PPC64LE.Decls.va_code | Prims.Tot | [
"total"
] | [] | [
"Prims.unit",
"Vale.PPC64LE.Decls.va_Block",
"Vale.PPC64LE.Decls.va_CCons",
"Vale.AES.PPC64LE.PolyOps.va_code_VPolyMulHigh",
"Vale.PPC64LE.Decls.va_op_vec_opr_vec",
"Vale.AES.PPC64LE.GF128_Mul.va_code_ShiftLeft128_1",
"Vale.PPC64LE.Decls.va_CNil",
"Vale.PPC64LE.Decls.va_code"
] | [] | false | false | false | true | false | let va_code_ClmulRev64High () =
| (va_Block (va_CCons (va_code_VPolyMulHigh (va_op_vec_opr_vec 1)
(va_op_vec_opr_vec 1)
(va_op_vec_opr_vec 2))
(va_CCons (va_code_ShiftLeft128_1 ()) (va_CNil ())))) | false |
Vale.AES.PPC64LE.GF128_Mul.fst | Vale.AES.PPC64LE.GF128_Mul.va_codegen_success_ClmulRev64High | val va_codegen_success_ClmulRev64High : va_dummy:unit -> Tot va_pbool | val va_codegen_success_ClmulRev64High : va_dummy:unit -> Tot va_pbool | let va_codegen_success_ClmulRev64High () =
(va_pbool_and (va_codegen_success_VPolyMulHigh (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 1)
(va_op_vec_opr_vec 2)) (va_pbool_and (va_codegen_success_ShiftLeft128_1 ()) (va_ttrue ()))) | {
"file_name": "obj/Vale.AES.PPC64LE.GF128_Mul.fst",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 95,
"end_line": 241,
"start_col": 0,
"start_line": 239
} | module Vale.AES.PPC64LE.GF128_Mul
open Vale.Def.Types_s
open Vale.Arch.Types
open Vale.Arch.TypesNative
open Vale.Math.Poly2_s
open Vale.Math.Poly2
open Vale.Math.Poly2.Bits_s
open Vale.Math.Poly2.Bits
open Vale.Math.Poly2.Lemmas
open Vale.AES.GF128_s
open Vale.AES.GF128
open Vale.PPC64LE.Machine_s
open Vale.PPC64LE.State
open Vale.PPC64LE.Decls
open Vale.PPC64LE.InsBasic
open Vale.PPC64LE.InsMem
open Vale.PPC64LE.InsVector
open Vale.PPC64LE.QuickCode
open Vale.PPC64LE.QuickCodes
open Vale.AES.PPC64LE.PolyOps
open Vale.AES.Types_helpers
open Vale.AES.GHash_BE
//-- ShiftLeft128_1
[@ "opaque_to_smt" va_qattr]
let va_code_ShiftLeft128_1 () =
(va_Block (va_CCons (va_code_Vspltisb (va_op_vec_opr_vec 2) 1) (va_CCons (va_code_Vsl
(va_op_vec_opr_vec 1) (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 2)) (va_CNil ()))))
[@ "opaque_to_smt" va_qattr]
let va_codegen_success_ShiftLeft128_1 () =
(va_pbool_and (va_codegen_success_Vspltisb (va_op_vec_opr_vec 2) 1) (va_pbool_and
(va_codegen_success_Vsl (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 2))
(va_ttrue ())))
[@ "opaque_to_smt" va_qattr]
let va_qcode_ShiftLeft128_1 (va_mods:va_mods_t) (a:poly) : (va_quickCode unit
(va_code_ShiftLeft128_1 ())) =
(qblock va_mods (fun (va_s:va_state) -> let (va_old_s:va_state) = va_s in va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 60 column 13 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(va_quick_Vspltisb (va_op_vec_opr_vec 2) 1) (va_QBind va_range1
"***** PRECONDITION NOT MET AT line 61 column 8 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(va_quick_Vsl (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 2)) (fun
(va_s:va_state) _ -> let (va_arg5:Vale.Math.Poly2_s.poly) = a in va_qPURE va_range1
"***** PRECONDITION NOT MET AT line 63 column 23 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(fun (_:unit) -> Vale.AES.GF128.lemma_shift_left_1 va_arg5) (va_QEmpty (()))))))
[@"opaque_to_smt"]
let va_lemma_ShiftLeft128_1 va_b0 va_s0 a =
let (va_mods:va_mods_t) = [va_Mod_vec 2; va_Mod_vec 1; va_Mod_ok] in
let va_qc = va_qcode_ShiftLeft128_1 va_mods a in
let (va_sM, va_fM, va_g) = va_wp_sound_code_norm (va_code_ShiftLeft128_1 ()) va_qc va_s0 (fun
va_s0 va_sM va_g -> let () = va_g in label va_range1
"***** POSTCONDITION NOT MET AT line 49 column 1 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(va_get_ok va_sM) /\ label va_range1
"***** POSTCONDITION NOT MET AT line 58 column 37 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(va_get_vec 1 va_sM == Vale.Math.Poly2.Bits_s.to_quad32 (Vale.Math.Poly2_s.shift a 1))) in
assert_norm (va_qc.mods == va_mods);
va_lemma_norm_mods ([va_Mod_vec 2; va_Mod_vec 1; va_Mod_ok]) va_sM va_s0;
(va_sM, va_fM)
[@"opaque_to_smt"]
let va_wpProof_ShiftLeft128_1 a va_s0 va_k =
let (va_sM, va_f0) = va_lemma_ShiftLeft128_1 (va_code_ShiftLeft128_1 ()) va_s0 a in
va_lemma_upd_update va_sM;
assert (va_state_eq va_sM (va_update_vec 2 va_sM (va_update_vec 1 va_sM (va_update_ok va_sM
va_s0))));
va_lemma_norm_mods ([va_Mod_vec 2; va_Mod_vec 1]) va_sM va_s0;
let va_g = () in
(va_sM, va_f0, va_g)
//--
//-- ShiftLeft2_128_1
val va_code_ShiftLeft2_128_1 : va_dummy:unit -> Tot va_code
[@ "opaque_to_smt" va_qattr]
let va_code_ShiftLeft2_128_1 () =
(va_Block (va_CCons (va_code_Vspltisw (va_op_vec_opr_vec 0) 0) (va_CCons (va_code_LoadImm64
(va_op_reg_opr_reg 10) 31) (va_CCons (va_code_Mtvsrws (va_op_vec_opr_vec 3) (va_op_reg_opr_reg
10)) (va_CCons (va_code_Vsrw (va_op_vec_opr_vec 3) (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 3))
(va_CCons (va_code_Vsldoi (va_op_vec_opr_vec 3) (va_op_vec_opr_vec 0) (va_op_vec_opr_vec 3) 4)
(va_CCons (va_code_Vspltisb (va_op_vec_opr_vec 0) 1) (va_CCons (va_code_Vsl (va_op_vec_opr_vec
1) (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 0)) (va_CCons (va_code_Vsl (va_op_vec_opr_vec 2)
(va_op_vec_opr_vec 2) (va_op_vec_opr_vec 0)) (va_CCons (va_code_Vxor (va_op_vec_opr_vec 2)
(va_op_vec_opr_vec 2) (va_op_vec_opr_vec 3)) (va_CNil ())))))))))))
val va_codegen_success_ShiftLeft2_128_1 : va_dummy:unit -> Tot va_pbool
[@ "opaque_to_smt" va_qattr]
let va_codegen_success_ShiftLeft2_128_1 () =
(va_pbool_and (va_codegen_success_Vspltisw (va_op_vec_opr_vec 0) 0) (va_pbool_and
(va_codegen_success_LoadImm64 (va_op_reg_opr_reg 10) 31) (va_pbool_and
(va_codegen_success_Mtvsrws (va_op_vec_opr_vec 3) (va_op_reg_opr_reg 10)) (va_pbool_and
(va_codegen_success_Vsrw (va_op_vec_opr_vec 3) (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 3))
(va_pbool_and (va_codegen_success_Vsldoi (va_op_vec_opr_vec 3) (va_op_vec_opr_vec 0)
(va_op_vec_opr_vec 3) 4) (va_pbool_and (va_codegen_success_Vspltisb (va_op_vec_opr_vec 0) 1)
(va_pbool_and (va_codegen_success_Vsl (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 1)
(va_op_vec_opr_vec 0)) (va_pbool_and (va_codegen_success_Vsl (va_op_vec_opr_vec 2)
(va_op_vec_opr_vec 2) (va_op_vec_opr_vec 0)) (va_pbool_and (va_codegen_success_Vxor
(va_op_vec_opr_vec 2) (va_op_vec_opr_vec 2) (va_op_vec_opr_vec 3)) (va_ttrue ()))))))))))
[@ "opaque_to_smt" va_qattr]
let va_qcode_ShiftLeft2_128_1 (va_mods:va_mods_t) (lo:poly) (hi:poly) : (va_quickCode unit
(va_code_ShiftLeft2_128_1 ())) =
(qblock va_mods (fun (va_s:va_state) -> let (va_old_s:va_state) = va_s in va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 83 column 13 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(va_quick_Vspltisw (va_op_vec_opr_vec 0) 0) (va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 84 column 14 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(va_quick_LoadImm64 (va_op_reg_opr_reg 10) 31) (va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 85 column 12 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(va_quick_Mtvsrws (va_op_vec_opr_vec 3) (va_op_reg_opr_reg 10)) (va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 86 column 9 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(va_quick_Vsrw (va_op_vec_opr_vec 3) (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 3)) (va_QSeq
va_range1
"***** PRECONDITION NOT MET AT line 87 column 11 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(va_quick_Vsldoi (va_op_vec_opr_vec 3) (va_op_vec_opr_vec 0) (va_op_vec_opr_vec 3) 4) (va_QSeq
va_range1
"***** PRECONDITION NOT MET AT line 88 column 13 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(va_quick_Vspltisb (va_op_vec_opr_vec 0) 1) (va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 89 column 8 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(va_quick_Vsl (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 0)) (va_QSeq
va_range1
"***** PRECONDITION NOT MET AT line 90 column 8 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(va_quick_Vsl (va_op_vec_opr_vec 2) (va_op_vec_opr_vec 2) (va_op_vec_opr_vec 0)) (va_QBind
va_range1
"***** PRECONDITION NOT MET AT line 91 column 9 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(va_quick_Vxor (va_op_vec_opr_vec 2) (va_op_vec_opr_vec 2) (va_op_vec_opr_vec 3)) (fun
(va_s:va_state) _ -> let (l:(Vale.Def.Words_s.four Vale.Def.Words_s.nat32)) =
Vale.Def.Words.Four_s.four_map #nat32 #Vale.Def.Words_s.nat32 (fun (i:nat32) ->
Vale.Arch.Types.ishl32 i 1) (va_get_vec 2 va_old_s) in let (r:(Vale.Def.Words_s.four
Vale.Def.Words_s.nat32)) = Vale.Def.Words.Four_s.four_map #nat32 #Vale.Def.Words_s.nat32 (fun
(i:nat32) -> Vale.Arch.Types.ishr32 i 31) (va_get_vec 2 va_old_s) in let
(va_arg22:Vale.Def.Types_s.quad32) = va_get_vec 3 va_s in let
(va_arg21:Vale.Def.Types_s.quad32) = Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0
(Vale.Def.Words_s.__proj__Mkfour__item__lo0 r) (Vale.Def.Words_s.__proj__Mkfour__item__lo1 r)
(Vale.Def.Words_s.__proj__Mkfour__item__hi2 r) in va_qPURE va_range1
"***** PRECONDITION NOT MET AT line 95 column 30 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(fun (_:unit) -> Vale.Arch.TypesNative.lemma_quad32_xor_commutes va_arg21 va_arg22) (let
(va_arg20:Vale.Def.Types_s.quad32) = va_get_vec 3 va_s in let
(va_arg19:Vale.Def.Types_s.quad32) = Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0
(Vale.Def.Words_s.__proj__Mkfour__item__lo0 r) (Vale.Def.Words_s.__proj__Mkfour__item__lo1 r)
(Vale.Def.Words_s.__proj__Mkfour__item__hi2 r) in let (va_arg18:Vale.Def.Types_s.quad32) = l in
va_qPURE va_range1
"***** PRECONDITION NOT MET AT line 96 column 32 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(fun (_:unit) -> Vale.Arch.TypesNative.lemma_quad32_xor_associates va_arg18 va_arg19 va_arg20)
(let (va_arg17:Vale.Math.Poly2_s.poly) = hi in let (va_arg16:Vale.Math.Poly2_s.poly) = lo in
va_qPURE va_range1
"***** PRECONDITION NOT MET AT line 98 column 25 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(fun (_:unit) -> Vale.AES.GF128.lemma_shift_2_left_1 va_arg16 va_arg17) (va_QEmpty
(())))))))))))))))
val va_lemma_ShiftLeft2_128_1 : va_b0:va_code -> va_s0:va_state -> lo:poly -> hi:poly
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_ShiftLeft2_128_1 ()) va_s0 /\ va_get_ok va_s0 /\
Vale.Math.Poly2_s.degree hi < 127 /\ Vale.Math.Poly2_s.degree lo <= 127 /\ va_get_vec 1 va_s0
== Vale.Math.Poly2.Bits_s.to_quad32 lo /\ va_get_vec 2 va_s0 ==
Vale.Math.Poly2.Bits_s.to_quad32 hi))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
(let n = Vale.Math.Poly2_s.monomial 128 in let a = Vale.Math.Poly2_s.add (Vale.Math.Poly2_s.mul
hi n) lo in let b = Vale.Math.Poly2_s.shift a 1 in va_get_vec 1 va_sM ==
Vale.Math.Poly2.Bits_s.to_quad32 (Vale.Math.Poly2_s.mod b n) /\ va_get_vec 2 va_sM ==
Vale.Math.Poly2.Bits_s.to_quad32 (Vale.Math.Poly2_s.div b n)) /\ va_state_eq va_sM
(va_update_vec 3 va_sM (va_update_vec 2 va_sM (va_update_vec 1 va_sM (va_update_vec 0 va_sM
(va_update_reg 10 va_sM (va_update_ok va_sM va_s0))))))))
[@"opaque_to_smt"]
let va_lemma_ShiftLeft2_128_1 va_b0 va_s0 lo hi =
let (va_mods:va_mods_t) = [va_Mod_vec 3; va_Mod_vec 2; va_Mod_vec 1; va_Mod_vec 0; va_Mod_reg 10;
va_Mod_ok] in
let va_qc = va_qcode_ShiftLeft2_128_1 va_mods lo hi in
let (va_sM, va_fM, va_g) = va_wp_sound_code_norm (va_code_ShiftLeft2_128_1 ()) va_qc va_s0 (fun
va_s0 va_sM va_g -> let () = va_g in label va_range1
"***** POSTCONDITION NOT MET AT line 66 column 1 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(va_get_ok va_sM) /\ label va_range1
"***** POSTCONDITION NOT MET AT line 77 column 9 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(let n = Vale.Math.Poly2_s.monomial 128 in label va_range1
"***** POSTCONDITION NOT MET AT line 78 column 9 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(let a = Vale.Math.Poly2_s.add (Vale.Math.Poly2_s.mul hi n) lo in label va_range1
"***** POSTCONDITION NOT MET AT line 79 column 9 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(let b = Vale.Math.Poly2_s.shift a 1 in label va_range1
"***** POSTCONDITION NOT MET AT line 80 column 35 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(va_get_vec 1 va_sM == Vale.Math.Poly2.Bits_s.to_quad32 (Vale.Math.Poly2_s.mod b n)) /\ label
va_range1
"***** POSTCONDITION NOT MET AT line 81 column 35 of file /home/gebner/fstar_dataset/projects/hacl-star/vale/code/crypto/aes/ppc64le/Vale.AES.PPC64LE.GF128_Mul.vaf *****"
(va_get_vec 2 va_sM == Vale.Math.Poly2.Bits_s.to_quad32 (Vale.Math.Poly2_s.div b n)))))) in
assert_norm (va_qc.mods == va_mods);
va_lemma_norm_mods ([va_Mod_vec 3; va_Mod_vec 2; va_Mod_vec 1; va_Mod_vec 0; va_Mod_reg 10;
va_Mod_ok]) va_sM va_s0;
(va_sM, va_fM)
[@ va_qattr]
let va_wp_ShiftLeft2_128_1 (lo:poly) (hi:poly) (va_s0:va_state) (va_k:(va_state -> unit -> Type0))
: Type0 =
(va_get_ok va_s0 /\ Vale.Math.Poly2_s.degree hi < 127 /\ Vale.Math.Poly2_s.degree lo <= 127 /\
va_get_vec 1 va_s0 == Vale.Math.Poly2.Bits_s.to_quad32 lo /\ va_get_vec 2 va_s0 ==
Vale.Math.Poly2.Bits_s.to_quad32 hi /\ (forall (va_x_r10:nat64) (va_x_v0:quad32)
(va_x_v1:quad32) (va_x_v2:quad32) (va_x_v3:quad32) . let va_sM = va_upd_vec 3 va_x_v3
(va_upd_vec 2 va_x_v2 (va_upd_vec 1 va_x_v1 (va_upd_vec 0 va_x_v0 (va_upd_reg 10 va_x_r10
va_s0)))) in va_get_ok va_sM /\ (let n = Vale.Math.Poly2_s.monomial 128 in let a =
Vale.Math.Poly2_s.add (Vale.Math.Poly2_s.mul hi n) lo in let b = Vale.Math.Poly2_s.shift a 1 in
va_get_vec 1 va_sM == Vale.Math.Poly2.Bits_s.to_quad32 (Vale.Math.Poly2_s.mod b n) /\
va_get_vec 2 va_sM == Vale.Math.Poly2.Bits_s.to_quad32 (Vale.Math.Poly2_s.div b n)) ==> va_k
va_sM (())))
val va_wpProof_ShiftLeft2_128_1 : lo:poly -> hi:poly -> va_s0:va_state -> va_k:(va_state -> unit ->
Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_ShiftLeft2_128_1 lo hi va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_ShiftLeft2_128_1 ()) ([va_Mod_vec 3;
va_Mod_vec 2; va_Mod_vec 1; va_Mod_vec 0; va_Mod_reg 10]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@"opaque_to_smt"]
let va_wpProof_ShiftLeft2_128_1 lo hi va_s0 va_k =
let (va_sM, va_f0) = va_lemma_ShiftLeft2_128_1 (va_code_ShiftLeft2_128_1 ()) va_s0 lo hi in
va_lemma_upd_update va_sM;
assert (va_state_eq va_sM (va_update_vec 3 va_sM (va_update_vec 2 va_sM (va_update_vec 1 va_sM
(va_update_vec 0 va_sM (va_update_reg 10 va_sM (va_update_ok va_sM va_s0)))))));
va_lemma_norm_mods ([va_Mod_vec 3; va_Mod_vec 2; va_Mod_vec 1; va_Mod_vec 0; va_Mod_reg 10])
va_sM va_s0;
let va_g = () in
(va_sM, va_f0, va_g)
[@ "opaque_to_smt" va_qattr]
let va_quick_ShiftLeft2_128_1 (lo:poly) (hi:poly) : (va_quickCode unit (va_code_ShiftLeft2_128_1
())) =
(va_QProc (va_code_ShiftLeft2_128_1 ()) ([va_Mod_vec 3; va_Mod_vec 2; va_Mod_vec 1; va_Mod_vec 0;
va_Mod_reg 10]) (va_wp_ShiftLeft2_128_1 lo hi) (va_wpProof_ShiftLeft2_128_1 lo hi))
//--
//-- ClmulRev64High
val va_code_ClmulRev64High : va_dummy:unit -> Tot va_code
[@ "opaque_to_smt" va_qattr]
let va_code_ClmulRev64High () =
(va_Block (va_CCons (va_code_VPolyMulHigh (va_op_vec_opr_vec 1) (va_op_vec_opr_vec 1)
(va_op_vec_opr_vec 2)) (va_CCons (va_code_ShiftLeft128_1 ()) (va_CNil ()))))
val va_codegen_success_ClmulRev64High : va_dummy:unit -> Tot va_pbool | {
"checked_file": "/",
"dependencies": [
"Vale.PPC64LE.State.fsti.checked",
"Vale.PPC64LE.QuickCodes.fsti.checked",
"Vale.PPC64LE.QuickCode.fst.checked",
"Vale.PPC64LE.Machine_s.fst.checked",
"Vale.PPC64LE.InsVector.fsti.checked",
"Vale.PPC64LE.InsMem.fsti.checked",
"Vale.PPC64LE.InsBasic.fsti.checked",
"Vale.PPC64LE.Decls.fsti.checked",
"Vale.Math.Poly2_s.fsti.checked",
"Vale.Math.Poly2.Words.fsti.checked",
"Vale.Math.Poly2.Lemmas.fsti.checked",
"Vale.Math.Poly2.Bits_s.fsti.checked",
"Vale.Math.Poly2.Bits.fsti.checked",
"Vale.Math.Poly2.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Arch.TypesNative.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.Types_helpers.fsti.checked",
"Vale.AES.PPC64LE.PolyOps.fsti.checked",
"Vale.AES.GHash_BE.fsti.checked",
"Vale.AES.GF128_s.fsti.checked",
"Vale.AES.GF128.fsti.checked",
"prims.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.PPC64LE.GF128_Mul.fst"
} | [
{
"abbrev": false,
"full_module": "Vale.AES.GHash_BE",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.PPC64LE.PolyOps",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Bits",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Bits_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.TypesNative",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash_BE",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.PPC64LE.PolyOps",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Bits",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Bits_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.TypesNative",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.PPC64LE",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.PPC64LE",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | va_dummy: Prims.unit -> Vale.PPC64LE.Decls.va_pbool | Prims.Tot | [
"total"
] | [] | [
"Prims.unit",
"Vale.PPC64LE.Decls.va_pbool_and",
"Vale.AES.PPC64LE.PolyOps.va_codegen_success_VPolyMulHigh",
"Vale.PPC64LE.Decls.va_op_vec_opr_vec",
"Vale.AES.PPC64LE.GF128_Mul.va_codegen_success_ShiftLeft128_1",
"Vale.PPC64LE.Decls.va_ttrue",
"Vale.PPC64LE.Decls.va_pbool"
] | [] | false | false | false | true | false | let va_codegen_success_ClmulRev64High () =
| (va_pbool_and (va_codegen_success_VPolyMulHigh (va_op_vec_opr_vec 1)
(va_op_vec_opr_vec 1)
(va_op_vec_opr_vec 2))
(va_pbool_and (va_codegen_success_ShiftLeft128_1 ()) (va_ttrue ()))) | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.