effect
stringclasses 48
values | original_source_type
stringlengths 0
23k
| opens_and_abbrevs
listlengths 2
92
| isa_cross_project_example
bool 1
class | source_definition
stringlengths 9
57.9k
| partial_definition
stringlengths 7
23.3k
| is_div
bool 2
classes | is_type
null | is_proof
bool 2
classes | completed_definiton
stringlengths 1
250k
| dependencies
dict | effect_flags
sequencelengths 0
2
| ideal_premises
sequencelengths 0
236
| mutual_with
sequencelengths 0
11
| file_context
stringlengths 0
407k
| interleaved
bool 1
class | is_simply_typed
bool 2
classes | file_name
stringlengths 5
48
| vconfig
dict | is_simple_lemma
null | source_type
stringlengths 10
23k
| proof_features
sequencelengths 0
1
| name
stringlengths 8
95
| source
dict | verbose_type
stringlengths 1
7.42k
| source_range
dict |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Prims.GTot | val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0 | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv | val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv = | false | null | false | elems_inv h rv /\ elems_reg h rv /\ rv_itself_inv h rv | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"sometrivial"
] | [
"LowStar.Regional.regional",
"FStar.Monotonic.HyperStack.mem",
"LowStar.RVector.rvector",
"Prims.l_and",
"LowStar.RVector.elems_inv",
"LowStar.RVector.elems_reg",
"LowStar.RVector.rv_itself_inv"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0 | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0 | [] | LowStar.RVector.rv_inv | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | h: FStar.Monotonic.HyperStack.mem -> rv: LowStar.RVector.rvector rg -> Prims.GTot Type0 | {
"end_col": 20,
"end_line": 135,
"start_col": 2,
"start_line": 133
} |
Prims.GTot | val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0 | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h) | val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j = | false | null | false | V.forall_seq rs i j (rg_inv rg h) | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"sometrivial"
] | [
"LowStar.Regional.regional",
"FStar.Monotonic.HyperStack.mem",
"FStar.Seq.Base.seq",
"FStar.Integers.nat",
"Prims.b2t",
"Prims.op_AmpAmp",
"FStar.Integers.op_Less_Equals",
"FStar.Integers.Signed",
"FStar.Integers.Winfinite",
"FStar.Seq.Base.length",
"LowStar.Vector.forall_seq",
"LowStar.Regional.rg_inv"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0 | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0 | [] | LowStar.RVector.rs_elems_inv | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
rg: LowStar.Regional.regional rst a ->
h: FStar.Monotonic.HyperStack.mem ->
rs: FStar.Seq.Base.seq a ->
i: FStar.Integers.nat ->
j: FStar.Integers.nat{i <= j && j <= FStar.Seq.Base.length rs}
-> Prims.GTot Type0 | {
"end_col": 35,
"end_line": 77,
"start_col": 2,
"start_line": 77
} |
Prims.GTot | val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0 | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2)) | val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j = | false | null | false | V.forall_seq rs i j (fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j (fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1) (Rgl?.region_of rg v2)) | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"sometrivial"
] | [
"LowStar.Regional.regional",
"FStar.Seq.Base.seq",
"FStar.Monotonic.HyperHeap.rid",
"FStar.Integers.nat",
"Prims.b2t",
"Prims.op_AmpAmp",
"FStar.Integers.op_Less_Equals",
"FStar.Integers.Signed",
"FStar.Integers.Winfinite",
"FStar.Seq.Base.length",
"Prims.l_and",
"LowStar.Vector.forall_seq",
"FStar.Monotonic.HyperHeap.extends",
"LowStar.Regional.__proj__Rgl__item__region_of",
"LowStar.Vector.forall2_seq",
"FStar.Monotonic.HyperHeap.disjoint"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0 | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0 | [] | LowStar.RVector.rs_elems_reg | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
rg: LowStar.Regional.regional rst a ->
rs: FStar.Seq.Base.seq a ->
prid: FStar.Monotonic.HyperHeap.rid ->
i: FStar.Integers.nat ->
j: FStar.Integers.nat{i <= j && j <= FStar.Seq.Base.length rs}
-> Prims.GTot Type0 | {
"end_col": 53,
"end_line": 104,
"start_col": 2,
"start_line": 100
} |
Prims.GTot | val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv) | val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv = | false | null | false | loc_all_regions_from false (V.frameOf rv) | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"sometrivial"
] | [
"LowStar.Regional.regional",
"LowStar.RVector.rvector",
"LowStar.Monotonic.Buffer.loc_all_regions_from",
"LowStar.Vector.frameOf",
"LowStar.Monotonic.Buffer.loc"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc | [] | LowStar.RVector.loc_rvector | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | rv: LowStar.RVector.rvector rg -> Prims.GTot LowStar.Monotonic.Buffer.loc | {
"end_col": 43,
"end_line": 65,
"start_col": 2,
"start_line": 65
} |
Prims.GTot | val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r))) | val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r = | false | null | false | B.loc_regions preserve_liveness
(Set.intersect (HS.mod_set (Set.singleton r)) (Set.complement (Set.singleton r))) | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"sometrivial"
] | [
"Prims.bool",
"FStar.Monotonic.HyperHeap.rid",
"LowStar.Monotonic.Buffer.loc_regions",
"FStar.Set.intersect",
"FStar.Monotonic.HyperHeap.mod_set",
"FStar.Set.singleton",
"FStar.Set.complement",
"LowStar.Monotonic.Buffer.loc"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc | [] | LowStar.RVector.loc_all_exts_from | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | preserve_liveness: Prims.bool -> r: FStar.Monotonic.HyperHeap.rid
-> Prims.GTot LowStar.Monotonic.Buffer.loc | {
"end_col": 41,
"end_line": 241,
"start_col": 2,
"start_line": 237
} |
Prims.GTot | val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i)) | val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i = | false | null | false | loc_all_regions_from false (Rgl?.region_of rg (S.index rs i)) | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"sometrivial"
] | [
"LowStar.Regional.regional",
"FStar.Seq.Base.seq",
"FStar.Integers.nat",
"Prims.b2t",
"FStar.Integers.op_Less",
"FStar.Integers.Signed",
"FStar.Integers.Winfinite",
"FStar.Seq.Base.length",
"LowStar.Monotonic.Buffer.loc_all_regions_from",
"LowStar.Regional.__proj__Rgl__item__region_of",
"FStar.Seq.Base.index",
"LowStar.Monotonic.Buffer.loc"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc | [] | LowStar.RVector.rs_loc_elem | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
rg: LowStar.Regional.regional rst a ->
rs: FStar.Seq.Base.seq a ->
i: FStar.Integers.nat{i < FStar.Seq.Base.length rs}
-> Prims.GTot LowStar.Monotonic.Buffer.loc | {
"end_col": 63,
"end_line": 166,
"start_col": 2,
"start_line": 166
} |
Prims.GTot | val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1) | val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i = | false | null | false | rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i + 1) | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"sometrivial"
] | [
"LowStar.Regional.regional",
"FStar.Monotonic.HyperStack.mem",
"LowStar.RVector.rvector",
"LowStar.Vector.uint32_t",
"Prims.b2t",
"FStar.Integers.op_Less",
"FStar.Integers.Unsigned",
"FStar.Integers.W32",
"LowStar.Vector.size_of",
"LowStar.RVector.rs_loc_elems",
"LowStar.Vector.as_seq",
"FStar.UInt32.v",
"FStar.Integers.op_Plus",
"FStar.Integers.Signed",
"FStar.Integers.Winfinite",
"LowStar.Monotonic.Buffer.loc"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc | [] | LowStar.RVector.rv_loc_elem | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
h: FStar.Monotonic.HyperStack.mem ->
rv: LowStar.RVector.rvector rg ->
i: LowStar.Vector.uint32_t{i < LowStar.Vector.size_of rv}
-> Prims.GTot LowStar.Monotonic.Buffer.loc | {
"end_col": 55,
"end_line": 191,
"start_col": 2,
"start_line": 191
} |
Prims.GTot | val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0 | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv) | val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv = | false | null | false | rv_elems_inv h rv 0ul (V.size_of rv) | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"sometrivial"
] | [
"LowStar.Regional.regional",
"FStar.Monotonic.HyperStack.mem",
"LowStar.RVector.rvector",
"LowStar.RVector.rv_elems_inv",
"FStar.UInt32.__uint_to_t",
"LowStar.Vector.size_of"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0 | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0 | [] | LowStar.RVector.elems_inv | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | h: FStar.Monotonic.HyperStack.mem -> rv: LowStar.RVector.rvector rg -> Prims.GTot Type0 | {
"end_col": 38,
"end_line": 92,
"start_col": 2,
"start_line": 92
} |
FStar.Pervasives.Lemma | val rv_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_region_only false (V.frameOf rv)))) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rv_loc_elems_parent_disj #a #rst #rg h rv i j =
rs_loc_elems_parent_disj rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j) | val rv_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_region_only false (V.frameOf rv))))
let rv_loc_elems_parent_disj #a #rst #rg h rv i j = | false | null | true | rs_loc_elems_parent_disj rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j) | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"lemma"
] | [
"LowStar.Regional.regional",
"FStar.Monotonic.HyperStack.mem",
"LowStar.RVector.rvector",
"LowStar.Vector.uint32_t",
"Prims.b2t",
"Prims.op_AmpAmp",
"FStar.Integers.op_Less_Equals",
"FStar.Integers.Unsigned",
"FStar.Integers.W32",
"LowStar.Vector.size_of",
"LowStar.RVector.rs_loc_elems_parent_disj",
"LowStar.Vector.as_seq",
"LowStar.Vector.frameOf",
"FStar.UInt32.v",
"Prims.unit"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1))
val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j)))
let rv_loc_elems_included #a #rst #rg h rv i j =
rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
// Properties about disjointness of locations
val rs_loc_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
l:nat{i <= l && l < j && k <> l} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj #a #rst rg rs prid i j k l = ()
val rs_loc_elem_disj_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (
forall (k:nat{i <= k && k < j}).
forall (l:nat{i <= l && l < j && k <> l}).
loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj_forall #a #rst rg rs prid i j = ()
val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2)
let rec rs_loc_elems_elem_disj #a #rst rg rs prid i j k1 k2 l =
if k1 = k2 then ()
else (rs_loc_elem_disj rg rs prid i j (k2 - 1) l;
rs_loc_elems_elem_disj rg rs prid i j k1 (k2 - 1) l)
val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2)
let rec rs_loc_elems_disj #a #rst rg rs prid i j k1 k2 l1 l2 =
if k1 = k2 then ()
else (rs_loc_elems_elem_disj rg rs prid i j l1 l2 (k2 - 1);
rs_loc_elems_disj rg rs prid i j k1 (k2 - 1) l1 l2)
val rv_loc_elems_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
k1:uint32_t{i <= k1} ->
k2:uint32_t{k1 <= k2 && k2 <= j} ->
l1:uint32_t{i <= l1} ->
l2:uint32_t{l1 <= l2 && l2 <= j} ->
Lemma (requires (rv_elems_reg h rv i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rv_loc_elems h rv k1 k2)
(rv_loc_elems h rv l1 l2)))
let rv_loc_elems_disj #a #rst #rg h rv i j k1 k2 l1 l2 =
rs_loc_elems_disj rg (V.as_seq h rv) (V.frameOf rv)
(U32.v i) (U32.v j) (U32.v k1) (U32.v k2) (U32.v l1) (U32.v l2)
val rs_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_region_only false prid)))
(decreases j)
let rec rs_loc_elems_parent_disj #a #rst rg rs prid i j =
if i = j then ()
else rs_loc_elems_parent_disj rg rs prid i (j - 1)
val rv_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_region_only false (V.frameOf rv)))) | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val rv_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_region_only false (V.frameOf rv)))) | [] | LowStar.RVector.rv_loc_elems_parent_disj | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
h: FStar.Monotonic.HyperStack.mem ->
rv: LowStar.RVector.rvector rg ->
i: LowStar.Vector.uint32_t ->
j: LowStar.Vector.uint32_t{i <= j && j <= LowStar.Vector.size_of rv}
-> FStar.Pervasives.Lemma (requires LowStar.RVector.rv_elems_reg h rv i j)
(ensures
LowStar.Monotonic.Buffer.loc_disjoint (LowStar.RVector.rv_loc_elems h rv i j)
(LowStar.Monotonic.Buffer.loc_region_only false (LowStar.Vector.frameOf rv))) | {
"end_col": 80,
"end_line": 368,
"start_col": 2,
"start_line": 368
} |
Prims.GTot | val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j) | val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j = | false | null | false | rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j) | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"sometrivial"
] | [
"LowStar.Regional.regional",
"FStar.Monotonic.HyperStack.mem",
"LowStar.RVector.rvector",
"LowStar.Vector.uint32_t",
"Prims.b2t",
"Prims.op_AmpAmp",
"FStar.Integers.op_Less_Equals",
"FStar.Integers.Unsigned",
"FStar.Integers.W32",
"LowStar.Vector.size_of",
"LowStar.RVector.rs_loc_elems",
"LowStar.Vector.as_seq",
"FStar.UInt32.v",
"LowStar.Monotonic.Buffer.loc"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc | [] | LowStar.RVector.rv_loc_elems | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
h: FStar.Monotonic.HyperStack.mem ->
rv: LowStar.RVector.rvector rg ->
i: LowStar.Vector.uint32_t ->
j: LowStar.Vector.uint32_t{i <= j && j <= LowStar.Vector.size_of rv}
-> Prims.GTot LowStar.Monotonic.Buffer.loc | {
"end_col": 53,
"end_line": 183,
"start_col": 2,
"start_line": 183
} |
FStar.Pervasives.Lemma | val rv_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
drid:HS.rid ->
Lemma (requires (V.forall_ h rv i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_all_regions_from false drid))) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rv_loc_elems_each_disj #a #rst #rg h rv i j drid =
rs_loc_elems_each_disj rg (V.as_seq h rv) drid (U32.v i) (U32.v j) | val rv_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
drid:HS.rid ->
Lemma (requires (V.forall_ h rv i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_all_regions_from false drid)))
let rv_loc_elems_each_disj #a #rst #rg h rv i j drid = | false | null | true | rs_loc_elems_each_disj rg (V.as_seq h rv) drid (U32.v i) (U32.v j) | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"lemma"
] | [
"LowStar.Regional.regional",
"FStar.Monotonic.HyperStack.mem",
"LowStar.RVector.rvector",
"LowStar.Vector.uint32_t",
"Prims.b2t",
"Prims.op_AmpAmp",
"FStar.Integers.op_Less_Equals",
"FStar.Integers.Unsigned",
"FStar.Integers.W32",
"LowStar.Vector.size_of",
"FStar.Monotonic.HyperHeap.rid",
"LowStar.RVector.rs_loc_elems_each_disj",
"LowStar.Vector.as_seq",
"FStar.UInt32.v",
"Prims.unit"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1))
val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j)))
let rv_loc_elems_included #a #rst #rg h rv i j =
rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
// Properties about disjointness of locations
val rs_loc_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
l:nat{i <= l && l < j && k <> l} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj #a #rst rg rs prid i j k l = ()
val rs_loc_elem_disj_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (
forall (k:nat{i <= k && k < j}).
forall (l:nat{i <= l && l < j && k <> l}).
loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj_forall #a #rst rg rs prid i j = ()
val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2)
let rec rs_loc_elems_elem_disj #a #rst rg rs prid i j k1 k2 l =
if k1 = k2 then ()
else (rs_loc_elem_disj rg rs prid i j (k2 - 1) l;
rs_loc_elems_elem_disj rg rs prid i j k1 (k2 - 1) l)
val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2)
let rec rs_loc_elems_disj #a #rst rg rs prid i j k1 k2 l1 l2 =
if k1 = k2 then ()
else (rs_loc_elems_elem_disj rg rs prid i j l1 l2 (k2 - 1);
rs_loc_elems_disj rg rs prid i j k1 (k2 - 1) l1 l2)
val rv_loc_elems_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
k1:uint32_t{i <= k1} ->
k2:uint32_t{k1 <= k2 && k2 <= j} ->
l1:uint32_t{i <= l1} ->
l2:uint32_t{l1 <= l2 && l2 <= j} ->
Lemma (requires (rv_elems_reg h rv i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rv_loc_elems h rv k1 k2)
(rv_loc_elems h rv l1 l2)))
let rv_loc_elems_disj #a #rst #rg h rv i j k1 k2 l1 l2 =
rs_loc_elems_disj rg (V.as_seq h rv) (V.frameOf rv)
(U32.v i) (U32.v j) (U32.v k1) (U32.v k2) (U32.v l1) (U32.v l2)
val rs_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_region_only false prid)))
(decreases j)
let rec rs_loc_elems_parent_disj #a #rst rg rs prid i j =
if i = j then ()
else rs_loc_elems_parent_disj rg rs prid i (j - 1)
val rv_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_region_only false (V.frameOf rv))))
let rv_loc_elems_parent_disj #a #rst #rg h rv i j =
rs_loc_elems_parent_disj rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val rs_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> drid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (V.forall_seq rs i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_all_regions_from false drid)))
(decreases j)
let rec rs_loc_elems_each_disj #a #rst rg rs drid i j =
if i = j then ()
else rs_loc_elems_each_disj rg rs drid i (j - 1)
val rv_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
drid:HS.rid ->
Lemma (requires (V.forall_ h rv i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_all_regions_from false drid))) | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val rv_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
drid:HS.rid ->
Lemma (requires (V.forall_ h rv i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_all_regions_from false drid))) | [] | LowStar.RVector.rv_loc_elems_each_disj | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
h: FStar.Monotonic.HyperStack.mem ->
rv: LowStar.RVector.rvector rg ->
i: LowStar.Vector.uint32_t ->
j: LowStar.Vector.uint32_t{i <= j && j <= LowStar.Vector.size_of rv} ->
drid: FStar.Monotonic.HyperHeap.rid
-> FStar.Pervasives.Lemma
(requires
LowStar.Vector.forall_ h
rv
i
j
(fun r -> FStar.Monotonic.HyperHeap.disjoint (Rgl?.region_of rg r) drid))
(ensures
LowStar.Monotonic.Buffer.loc_disjoint (LowStar.RVector.rv_loc_elems h rv i j)
(LowStar.Monotonic.Buffer.loc_all_regions_from false drid)) | {
"end_col": 68,
"end_line": 393,
"start_col": 2,
"start_line": 393
} |
Prims.GTot | val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0 | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j) | val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j = | false | null | false | rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j) | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"sometrivial"
] | [
"LowStar.Regional.regional",
"FStar.Monotonic.HyperStack.mem",
"LowStar.RVector.rvector",
"LowStar.Vector.uint32_t",
"Prims.b2t",
"Prims.op_AmpAmp",
"FStar.Integers.op_Less_Equals",
"FStar.Integers.Unsigned",
"FStar.Integers.W32",
"LowStar.Vector.size_of",
"LowStar.RVector.rs_elems_inv",
"LowStar.Vector.as_seq",
"FStar.UInt32.v"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0 | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0 | [] | LowStar.RVector.rv_elems_inv | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
h: FStar.Monotonic.HyperStack.mem ->
rv: LowStar.RVector.rvector rg ->
i: LowStar.Vector.uint32_t ->
j: LowStar.Vector.uint32_t{i <= j && j <= LowStar.Vector.size_of rv}
-> Prims.GTot Type0 | {
"end_col": 55,
"end_line": 85,
"start_col": 2,
"start_line": 85
} |
FStar.HyperStack.ST.ST | val alloc_empty:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 bv h1 -> h0 == h1 /\ V.size_of bv = 0ul)) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let alloc_empty #a #rst rg =
V.alloc_empty a | val alloc_empty:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 bv h1 -> h0 == h1 /\ V.size_of bv = 0ul))
let alloc_empty #a #rst rg = | true | null | false | V.alloc_empty a | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [] | [
"LowStar.Regional.regional",
"LowStar.Vector.alloc_empty",
"LowStar.RVector.rvector"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1))
val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j)))
let rv_loc_elems_included #a #rst #rg h rv i j =
rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
// Properties about disjointness of locations
val rs_loc_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
l:nat{i <= l && l < j && k <> l} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj #a #rst rg rs prid i j k l = ()
val rs_loc_elem_disj_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (
forall (k:nat{i <= k && k < j}).
forall (l:nat{i <= l && l < j && k <> l}).
loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj_forall #a #rst rg rs prid i j = ()
val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2)
let rec rs_loc_elems_elem_disj #a #rst rg rs prid i j k1 k2 l =
if k1 = k2 then ()
else (rs_loc_elem_disj rg rs prid i j (k2 - 1) l;
rs_loc_elems_elem_disj rg rs prid i j k1 (k2 - 1) l)
val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2)
let rec rs_loc_elems_disj #a #rst rg rs prid i j k1 k2 l1 l2 =
if k1 = k2 then ()
else (rs_loc_elems_elem_disj rg rs prid i j l1 l2 (k2 - 1);
rs_loc_elems_disj rg rs prid i j k1 (k2 - 1) l1 l2)
val rv_loc_elems_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
k1:uint32_t{i <= k1} ->
k2:uint32_t{k1 <= k2 && k2 <= j} ->
l1:uint32_t{i <= l1} ->
l2:uint32_t{l1 <= l2 && l2 <= j} ->
Lemma (requires (rv_elems_reg h rv i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rv_loc_elems h rv k1 k2)
(rv_loc_elems h rv l1 l2)))
let rv_loc_elems_disj #a #rst #rg h rv i j k1 k2 l1 l2 =
rs_loc_elems_disj rg (V.as_seq h rv) (V.frameOf rv)
(U32.v i) (U32.v j) (U32.v k1) (U32.v k2) (U32.v l1) (U32.v l2)
val rs_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_region_only false prid)))
(decreases j)
let rec rs_loc_elems_parent_disj #a #rst rg rs prid i j =
if i = j then ()
else rs_loc_elems_parent_disj rg rs prid i (j - 1)
val rv_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_region_only false (V.frameOf rv))))
let rv_loc_elems_parent_disj #a #rst #rg h rv i j =
rs_loc_elems_parent_disj rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val rs_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> drid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (V.forall_seq rs i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_all_regions_from false drid)))
(decreases j)
let rec rs_loc_elems_each_disj #a #rst rg rs drid i j =
if i = j then ()
else rs_loc_elems_each_disj rg rs drid i (j - 1)
val rv_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
drid:HS.rid ->
Lemma (requires (V.forall_ h rv i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_all_regions_from false drid)))
let rv_loc_elems_each_disj #a #rst #rg h rv i j drid =
rs_loc_elems_each_disj rg (V.as_seq h rv) drid (U32.v i) (U32.v j)
// Preservation based on disjointness
val rv_loc_elems_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
loc_disjoint p (V.loc_vector_within rv i j) /\
modifies p h0 h1))
(ensures (rv_loc_elems h0 rv i j ==
rv_loc_elems h1 rv i j))
(decreases (U32.v j))
let rec rv_loc_elems_preserved #a #rst #rg rv i j p h0 h1 =
if i = j then ()
else (V.loc_vector_within_includes rv i j (j - 1ul) j;
V.get_preserved rv (j - 1ul) p h0 h1;
assert (V.get h0 rv (j - 1ul) == V.get h1 rv (j - 1ul));
V.loc_vector_within_includes rv i j i (j - 1ul);
rv_loc_elems_preserved rv i (j - 1ul) p h0 h1)
val rs_elems_inv_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv rg h1 rs i j))
(decreases j)
let rec rs_elems_inv_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val rv_elems_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (V.loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
modifies p h0 h1))
(ensures (rv_elems_inv h1 rv i j))
let rv_elems_inv_preserved #a #rst #rg rv i j p h0 h1 =
rs_elems_inv_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val rv_inv_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_ #a #rst #rg rv p h0 h1 =
rv_elems_inv_preserved #a #rst #rg rv 0ul (V.size_of rv) p h0 h1
// The first core lemma of `rvector`
val rv_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let rv_inv_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
rv_inv_preserved_ rv p h0 h1
val rv_inv_preserved_int:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
modifies (loc_all_regions_from false
(Rgl?.region_of rg (V.get h0 rv i))) h0 h1 /\
rg_inv rg h1 (V.get h1 rv i)))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_int #a #rst #rg rv i h0 h1 =
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv)) 0 (U32.v i) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) 0 (U32.v i)
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1;
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv))
(U32.v i + 1) (U32.v (V.size_of rv)) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) (U32.v i + 1) (U32.v (V.size_of rv))
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1
/// Representation
val as_seq_seq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = j - i})
(decreases j)
let rec as_seq_seq #a #rst rg h rs i j =
if i = j then S.empty
else S.snoc (as_seq_seq rg h rs i (j - 1))
(Rgl?.r_repr rg h (S.index rs (j - 1)))
val as_seq_sub:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t ->
j:uint32_t{
i <= j /\
j <= V.size_of rv /\
rv_elems_inv h rv i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v j - U32.v i})
(decreases (U32.v j))
let as_seq_sub #a #rst #rg h rv i j =
as_seq_seq rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v (V.size_of rv)})
let as_seq #a #rst #rg h rv =
as_seq_sub h rv 0ul (V.size_of rv)
val as_seq_sub_as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
Lemma (S.equal (as_seq_sub h rv 0ul (V.size_of rv))
(as_seq h rv))
[SMTPat (as_seq_sub h rv 0ul (V.size_of rv))]
let as_seq_sub_as_seq #a #rst #rg h rv = ()
val as_seq_seq_index:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat{k < j - i} ->
Lemma (requires true)
(ensures (S.index (as_seq_seq rg h rs i j) k ==
Rgl?.r_repr rg h (S.index rs (i + k))))
(decreases j)
[SMTPat (S.index (as_seq_seq rg h rs i j) k)]
let rec as_seq_seq_index #a #rst rg h rs i j k =
if i = j then ()
else if k = j - i - 1 then ()
else as_seq_seq_index rg h rs i (j - 1) k
val as_seq_seq_eq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs1:S.seq a -> rs2:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs1 /\ rs_elems_inv rg h rs1 i j} ->
k:nat ->
l:nat{k <= l /\ l <= S.length rs2 /\ rs_elems_inv rg h rs2 k l} ->
Lemma (requires (S.equal (S.slice rs1 i j) (S.slice rs2 k l)))
(ensures (S.equal (as_seq_seq rg h rs1 i j)
(as_seq_seq rg h rs2 k l)))
let as_seq_seq_eq #a #rst rg h rs1 rs2 i j k l =
assert (forall (a:nat{a < j - i}).
S.index (as_seq_seq rg h rs1 i j) a ==
Rgl?.r_repr rg h (S.index rs1 (i + a)));
assert (forall (a:nat{a < l - k}).
S.index (as_seq_seq rg h rs2 k l) a ==
Rgl?.r_repr rg h (S.index rs2 (k + a)));
assert (S.length (S.slice rs1 i j) = j - i);
assert (S.length (S.slice rs2 k l) = l - k);
assert (forall (a:nat{a < j - i}).
S.index (S.slice rs1 i j) a ==
S.index (S.slice rs2 k l) a);
assert (forall (a:nat{a < j - i}).
S.index rs1 (i + a) == S.index rs2 (k + a))
val as_seq_seq_slice:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat -> l:nat{k <= l && l <= j - i} ->
Lemma (S.equal (S.slice (as_seq_seq rg h rs i j) k l)
(as_seq_seq rg h (S.slice rs (i + k) (i + l)) 0 (l - k)))
#reset-options "--z3rlimit 10"
let rec as_seq_seq_slice #a #rst rg h rs i j k l =
if k = l then ()
else (as_seq_seq_slice rg h rs i j k (l - 1);
as_seq_seq_index rg h rs i j (l - 1);
as_seq_seq_eq rg h
(S.slice rs (i + k) (i + l - 1))
(S.slice rs (i + k) (i + l))
0 (l - k - 1) 0 (l - k - 1))
val as_seq_seq_upd:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{
i <= j /\
j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
k:nat{i <= k && k < j} -> v:a{rg_inv rg h v} ->
Lemma (S.equal (as_seq_seq rg h (S.upd rs k v) i j)
(S.upd (as_seq_seq rg h rs i j) (k - i)
(Rgl?.r_repr rg h v)))
let rec as_seq_seq_upd #a #rst rg h rs i j k v =
if i = j then ()
else if k = j - 1 then ()
else as_seq_seq_upd rg h rs i (j - 1) k v
// Preservation based on disjointness
val as_seq_seq_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv_preserved rg rs i j p h0 h1;
S.equal (as_seq_seq rg h0 rs i j)
(as_seq_seq rg h1 rs i j)))
let rec as_seq_seq_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
as_seq_seq_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val as_seq_sub_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
loc_disjoint p (V.loc_vector rv) /\
modifies p h0 h1))
(ensures (rv_elems_inv_preserved rv i j p h0 h1;
S.equal (as_seq_sub h0 rv i j)
(as_seq_sub h1 rv i j)))
let as_seq_sub_preserved #a #rst #rg rv i j p h0 h1 =
as_seq_seq_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val as_seq_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv_preserved_ rv p h0 h1;
S.equal (as_seq h0 rv) (as_seq h1 rv)))
let as_seq_preserved_ #a #rst #rg rv p h0 h1 =
as_seq_sub_preserved rv 0ul (V.size_of rv) p h0 h1
// The second core lemma of `rvector`
val as_seq_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv_preserved rv p h0 h1;
S.equal (as_seq h0 rv) (as_seq h1 rv)))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let as_seq_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
as_seq_preserved_ rv p h0 h1
/// Construction
val alloc_empty:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 bv h1 -> h0 == h1 /\ V.size_of bv = 0ul)) | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 10,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val alloc_empty:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 bv h1 -> h0 == h1 /\ V.size_of bv = 0ul)) | [] | LowStar.RVector.alloc_empty | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | rg: LowStar.Regional.regional rst a -> FStar.HyperStack.ST.ST (LowStar.RVector.rvector rg) | {
"end_col": 17,
"end_line": 692,
"start_col": 2,
"start_line": 692
} |
FStar.Pervasives.Lemma | val rv_elems_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (V.loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
modifies p h0 h1))
(ensures (rv_elems_inv h1 rv i j)) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rv_elems_inv_preserved #a #rst #rg rv i j p h0 h1 =
rs_elems_inv_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1 | val rv_elems_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (V.loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
modifies p h0 h1))
(ensures (rv_elems_inv h1 rv i j))
let rv_elems_inv_preserved #a #rst #rg rv i j p h0 h1 = | false | null | true | rs_elems_inv_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1 | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"lemma"
] | [
"LowStar.Regional.regional",
"LowStar.RVector.rvector",
"LowStar.Vector.uint32_t",
"Prims.b2t",
"Prims.op_AmpAmp",
"FStar.Integers.op_Less_Equals",
"FStar.Integers.Unsigned",
"FStar.Integers.W32",
"LowStar.Vector.size_of",
"LowStar.Monotonic.Buffer.loc",
"FStar.Monotonic.HyperStack.mem",
"LowStar.RVector.rs_elems_inv_preserved",
"LowStar.Vector.as_seq",
"FStar.UInt32.v",
"Prims.unit"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1))
val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j)))
let rv_loc_elems_included #a #rst #rg h rv i j =
rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
// Properties about disjointness of locations
val rs_loc_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
l:nat{i <= l && l < j && k <> l} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj #a #rst rg rs prid i j k l = ()
val rs_loc_elem_disj_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (
forall (k:nat{i <= k && k < j}).
forall (l:nat{i <= l && l < j && k <> l}).
loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj_forall #a #rst rg rs prid i j = ()
val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2)
let rec rs_loc_elems_elem_disj #a #rst rg rs prid i j k1 k2 l =
if k1 = k2 then ()
else (rs_loc_elem_disj rg rs prid i j (k2 - 1) l;
rs_loc_elems_elem_disj rg rs prid i j k1 (k2 - 1) l)
val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2)
let rec rs_loc_elems_disj #a #rst rg rs prid i j k1 k2 l1 l2 =
if k1 = k2 then ()
else (rs_loc_elems_elem_disj rg rs prid i j l1 l2 (k2 - 1);
rs_loc_elems_disj rg rs prid i j k1 (k2 - 1) l1 l2)
val rv_loc_elems_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
k1:uint32_t{i <= k1} ->
k2:uint32_t{k1 <= k2 && k2 <= j} ->
l1:uint32_t{i <= l1} ->
l2:uint32_t{l1 <= l2 && l2 <= j} ->
Lemma (requires (rv_elems_reg h rv i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rv_loc_elems h rv k1 k2)
(rv_loc_elems h rv l1 l2)))
let rv_loc_elems_disj #a #rst #rg h rv i j k1 k2 l1 l2 =
rs_loc_elems_disj rg (V.as_seq h rv) (V.frameOf rv)
(U32.v i) (U32.v j) (U32.v k1) (U32.v k2) (U32.v l1) (U32.v l2)
val rs_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_region_only false prid)))
(decreases j)
let rec rs_loc_elems_parent_disj #a #rst rg rs prid i j =
if i = j then ()
else rs_loc_elems_parent_disj rg rs prid i (j - 1)
val rv_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_region_only false (V.frameOf rv))))
let rv_loc_elems_parent_disj #a #rst #rg h rv i j =
rs_loc_elems_parent_disj rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val rs_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> drid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (V.forall_seq rs i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_all_regions_from false drid)))
(decreases j)
let rec rs_loc_elems_each_disj #a #rst rg rs drid i j =
if i = j then ()
else rs_loc_elems_each_disj rg rs drid i (j - 1)
val rv_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
drid:HS.rid ->
Lemma (requires (V.forall_ h rv i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_all_regions_from false drid)))
let rv_loc_elems_each_disj #a #rst #rg h rv i j drid =
rs_loc_elems_each_disj rg (V.as_seq h rv) drid (U32.v i) (U32.v j)
// Preservation based on disjointness
val rv_loc_elems_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
loc_disjoint p (V.loc_vector_within rv i j) /\
modifies p h0 h1))
(ensures (rv_loc_elems h0 rv i j ==
rv_loc_elems h1 rv i j))
(decreases (U32.v j))
let rec rv_loc_elems_preserved #a #rst #rg rv i j p h0 h1 =
if i = j then ()
else (V.loc_vector_within_includes rv i j (j - 1ul) j;
V.get_preserved rv (j - 1ul) p h0 h1;
assert (V.get h0 rv (j - 1ul) == V.get h1 rv (j - 1ul));
V.loc_vector_within_includes rv i j i (j - 1ul);
rv_loc_elems_preserved rv i (j - 1ul) p h0 h1)
val rs_elems_inv_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv rg h1 rs i j))
(decreases j)
let rec rs_elems_inv_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val rv_elems_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (V.loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
modifies p h0 h1))
(ensures (rv_elems_inv h1 rv i j)) | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val rv_elems_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (V.loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
modifies p h0 h1))
(ensures (rv_elems_inv h1 rv i j)) | [] | LowStar.RVector.rv_elems_inv_preserved | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
rv: LowStar.RVector.rvector rg ->
i: LowStar.Vector.uint32_t ->
j: LowStar.Vector.uint32_t{i <= j && j <= LowStar.Vector.size_of rv} ->
p: LowStar.Monotonic.Buffer.loc ->
h0: FStar.Monotonic.HyperStack.mem ->
h1: FStar.Monotonic.HyperStack.mem
-> FStar.Pervasives.Lemma
(requires
LowStar.Vector.live h0 rv /\ LowStar.RVector.rv_elems_inv h0 rv i j /\
LowStar.Monotonic.Buffer.loc_disjoint p (LowStar.Vector.loc_vector rv) /\
LowStar.Monotonic.Buffer.loc_disjoint p (LowStar.RVector.rv_loc_elems h0 rv i j) /\
LowStar.Monotonic.Buffer.modifies p h0 h1) (ensures LowStar.RVector.rv_elems_inv h1 rv i j) | {
"end_col": 72,
"end_line": 440,
"start_col": 2,
"start_line": 440
} |
FStar.Pervasives.Lemma | val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j))) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rv_loc_elems_included #a #rst #rg h rv i j =
rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j) | val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j)))
let rv_loc_elems_included #a #rst #rg h rv i j = | false | null | true | rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j) | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"lemma"
] | [
"LowStar.Regional.regional",
"FStar.Monotonic.HyperStack.mem",
"LowStar.RVector.rvector",
"LowStar.Vector.uint32_t",
"Prims.b2t",
"Prims.op_AmpAmp",
"FStar.Integers.op_Less_Equals",
"FStar.Integers.Unsigned",
"FStar.Integers.W32",
"LowStar.Vector.size_of",
"LowStar.RVector.rs_loc_elems_included",
"LowStar.Vector.as_seq",
"LowStar.Vector.frameOf",
"FStar.UInt32.v",
"Prims.unit"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1))
val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j))) | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j))) | [] | LowStar.RVector.rv_loc_elems_included | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
h: FStar.Monotonic.HyperStack.mem ->
rv: LowStar.RVector.rvector rg ->
i: LowStar.Vector.uint32_t ->
j: LowStar.Vector.uint32_t{i <= j && j <= LowStar.Vector.size_of rv}
-> FStar.Pervasives.Lemma (requires LowStar.RVector.rv_elems_reg h rv i j)
(ensures
LowStar.Monotonic.Buffer.loc_includes (LowStar.RVector.loc_all_exts_from false
(LowStar.Vector.frameOf rv))
(LowStar.RVector.rv_loc_elems h rv i j)) | {
"end_col": 77,
"end_line": 273,
"start_col": 2,
"start_line": 273
} |
Prims.GTot | val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0 | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv) | val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv = | false | null | false | rv_elems_reg h rv 0ul (V.size_of rv) | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"sometrivial"
] | [
"LowStar.Regional.regional",
"FStar.Monotonic.HyperStack.mem",
"LowStar.RVector.rvector",
"LowStar.RVector.rv_elems_reg",
"FStar.UInt32.__uint_to_t",
"LowStar.Vector.size_of"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0 | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0 | [] | LowStar.RVector.elems_reg | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | h: FStar.Monotonic.HyperStack.mem -> rv: LowStar.RVector.rvector rg -> Prims.GTot Type0 | {
"end_col": 38,
"end_line": 119,
"start_col": 2,
"start_line": 119
} |
FStar.Pervasives.Lemma | val rv_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)] | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rv_inv_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
rv_inv_preserved_ rv p h0 h1 | val rv_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let rv_inv_preserved #a #rst #rg rv p h0 h1 = | false | null | true | assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
rv_inv_preserved_ rv p h0 h1 | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"lemma"
] | [
"LowStar.Regional.regional",
"LowStar.RVector.rvector",
"LowStar.Monotonic.Buffer.loc",
"FStar.Monotonic.HyperStack.mem",
"LowStar.RVector.rv_inv_preserved_",
"Prims.unit",
"Prims._assert",
"LowStar.Monotonic.Buffer.loc_includes",
"LowStar.RVector.loc_rvector",
"LowStar.RVector.rv_loc_elems",
"FStar.UInt32.__uint_to_t",
"LowStar.Vector.size_of",
"LowStar.RVector.rv_loc_elems_included",
"LowStar.Vector.loc_vector"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1))
val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j)))
let rv_loc_elems_included #a #rst #rg h rv i j =
rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
// Properties about disjointness of locations
val rs_loc_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
l:nat{i <= l && l < j && k <> l} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj #a #rst rg rs prid i j k l = ()
val rs_loc_elem_disj_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (
forall (k:nat{i <= k && k < j}).
forall (l:nat{i <= l && l < j && k <> l}).
loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj_forall #a #rst rg rs prid i j = ()
val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2)
let rec rs_loc_elems_elem_disj #a #rst rg rs prid i j k1 k2 l =
if k1 = k2 then ()
else (rs_loc_elem_disj rg rs prid i j (k2 - 1) l;
rs_loc_elems_elem_disj rg rs prid i j k1 (k2 - 1) l)
val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2)
let rec rs_loc_elems_disj #a #rst rg rs prid i j k1 k2 l1 l2 =
if k1 = k2 then ()
else (rs_loc_elems_elem_disj rg rs prid i j l1 l2 (k2 - 1);
rs_loc_elems_disj rg rs prid i j k1 (k2 - 1) l1 l2)
val rv_loc_elems_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
k1:uint32_t{i <= k1} ->
k2:uint32_t{k1 <= k2 && k2 <= j} ->
l1:uint32_t{i <= l1} ->
l2:uint32_t{l1 <= l2 && l2 <= j} ->
Lemma (requires (rv_elems_reg h rv i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rv_loc_elems h rv k1 k2)
(rv_loc_elems h rv l1 l2)))
let rv_loc_elems_disj #a #rst #rg h rv i j k1 k2 l1 l2 =
rs_loc_elems_disj rg (V.as_seq h rv) (V.frameOf rv)
(U32.v i) (U32.v j) (U32.v k1) (U32.v k2) (U32.v l1) (U32.v l2)
val rs_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_region_only false prid)))
(decreases j)
let rec rs_loc_elems_parent_disj #a #rst rg rs prid i j =
if i = j then ()
else rs_loc_elems_parent_disj rg rs prid i (j - 1)
val rv_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_region_only false (V.frameOf rv))))
let rv_loc_elems_parent_disj #a #rst #rg h rv i j =
rs_loc_elems_parent_disj rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val rs_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> drid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (V.forall_seq rs i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_all_regions_from false drid)))
(decreases j)
let rec rs_loc_elems_each_disj #a #rst rg rs drid i j =
if i = j then ()
else rs_loc_elems_each_disj rg rs drid i (j - 1)
val rv_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
drid:HS.rid ->
Lemma (requires (V.forall_ h rv i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_all_regions_from false drid)))
let rv_loc_elems_each_disj #a #rst #rg h rv i j drid =
rs_loc_elems_each_disj rg (V.as_seq h rv) drid (U32.v i) (U32.v j)
// Preservation based on disjointness
val rv_loc_elems_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
loc_disjoint p (V.loc_vector_within rv i j) /\
modifies p h0 h1))
(ensures (rv_loc_elems h0 rv i j ==
rv_loc_elems h1 rv i j))
(decreases (U32.v j))
let rec rv_loc_elems_preserved #a #rst #rg rv i j p h0 h1 =
if i = j then ()
else (V.loc_vector_within_includes rv i j (j - 1ul) j;
V.get_preserved rv (j - 1ul) p h0 h1;
assert (V.get h0 rv (j - 1ul) == V.get h1 rv (j - 1ul));
V.loc_vector_within_includes rv i j i (j - 1ul);
rv_loc_elems_preserved rv i (j - 1ul) p h0 h1)
val rs_elems_inv_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv rg h1 rs i j))
(decreases j)
let rec rs_elems_inv_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val rv_elems_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (V.loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
modifies p h0 h1))
(ensures (rv_elems_inv h1 rv i j))
let rv_elems_inv_preserved #a #rst #rg rv i j p h0 h1 =
rs_elems_inv_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val rv_inv_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_ #a #rst #rg rv p h0 h1 =
rv_elems_inv_preserved #a #rst #rg rv 0ul (V.size_of rv) p h0 h1
// The first core lemma of `rvector`
val rv_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)] | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val rv_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)] | [] | LowStar.RVector.rv_inv_preserved | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
rv: LowStar.RVector.rvector rg ->
p: LowStar.Monotonic.Buffer.loc ->
h0: FStar.Monotonic.HyperStack.mem ->
h1: FStar.Monotonic.HyperStack.mem
-> FStar.Pervasives.Lemma
(requires
LowStar.RVector.rv_inv h0 rv /\
LowStar.Monotonic.Buffer.loc_disjoint p (LowStar.RVector.loc_rvector rv) /\
LowStar.Monotonic.Buffer.modifies p h0 h1)
(ensures LowStar.RVector.rv_inv h1 rv)
[
SMTPat (LowStar.RVector.rv_inv h0 rv);
SMTPat (LowStar.Monotonic.Buffer.loc_disjoint p (LowStar.RVector.loc_rvector rv));
SMTPat (LowStar.Monotonic.Buffer.modifies p h0 h1)
] | {
"end_col": 30,
"end_line": 468,
"start_col": 2,
"start_line": 465
} |
Prims.GTot | val as_seq_sub:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t ->
j:uint32_t{
i <= j /\
j <= V.size_of rv /\
rv_elems_inv h rv i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v j - U32.v i})
(decreases (U32.v j)) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let as_seq_sub #a #rst #rg h rv i j =
as_seq_seq rg h (V.as_seq h rv) (U32.v i) (U32.v j) | val as_seq_sub:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t ->
j:uint32_t{
i <= j /\
j <= V.size_of rv /\
rv_elems_inv h rv i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v j - U32.v i})
(decreases (U32.v j))
let as_seq_sub #a #rst #rg h rv i j = | false | null | false | as_seq_seq rg h (V.as_seq h rv) (U32.v i) (U32.v j) | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"sometrivial",
""
] | [
"LowStar.Regional.regional",
"FStar.Monotonic.HyperStack.mem",
"LowStar.RVector.rvector",
"LowStar.Vector.uint32_t",
"Prims.l_and",
"Prims.b2t",
"FStar.Integers.op_Less_Equals",
"FStar.Integers.Unsigned",
"FStar.Integers.W32",
"LowStar.Vector.size_of",
"LowStar.RVector.rv_elems_inv",
"LowStar.RVector.as_seq_seq",
"LowStar.Vector.as_seq",
"FStar.UInt32.v",
"FStar.Seq.Base.seq",
"LowStar.Regional.__proj__Rgl__item__repr",
"Prims.op_Equality",
"Prims.int",
"FStar.Seq.Base.length",
"FStar.Integers.op_Subtraction",
"FStar.Integers.Signed",
"FStar.Integers.Winfinite"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1))
val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j)))
let rv_loc_elems_included #a #rst #rg h rv i j =
rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
// Properties about disjointness of locations
val rs_loc_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
l:nat{i <= l && l < j && k <> l} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj #a #rst rg rs prid i j k l = ()
val rs_loc_elem_disj_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (
forall (k:nat{i <= k && k < j}).
forall (l:nat{i <= l && l < j && k <> l}).
loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj_forall #a #rst rg rs prid i j = ()
val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2)
let rec rs_loc_elems_elem_disj #a #rst rg rs prid i j k1 k2 l =
if k1 = k2 then ()
else (rs_loc_elem_disj rg rs prid i j (k2 - 1) l;
rs_loc_elems_elem_disj rg rs prid i j k1 (k2 - 1) l)
val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2)
let rec rs_loc_elems_disj #a #rst rg rs prid i j k1 k2 l1 l2 =
if k1 = k2 then ()
else (rs_loc_elems_elem_disj rg rs prid i j l1 l2 (k2 - 1);
rs_loc_elems_disj rg rs prid i j k1 (k2 - 1) l1 l2)
val rv_loc_elems_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
k1:uint32_t{i <= k1} ->
k2:uint32_t{k1 <= k2 && k2 <= j} ->
l1:uint32_t{i <= l1} ->
l2:uint32_t{l1 <= l2 && l2 <= j} ->
Lemma (requires (rv_elems_reg h rv i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rv_loc_elems h rv k1 k2)
(rv_loc_elems h rv l1 l2)))
let rv_loc_elems_disj #a #rst #rg h rv i j k1 k2 l1 l2 =
rs_loc_elems_disj rg (V.as_seq h rv) (V.frameOf rv)
(U32.v i) (U32.v j) (U32.v k1) (U32.v k2) (U32.v l1) (U32.v l2)
val rs_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_region_only false prid)))
(decreases j)
let rec rs_loc_elems_parent_disj #a #rst rg rs prid i j =
if i = j then ()
else rs_loc_elems_parent_disj rg rs prid i (j - 1)
val rv_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_region_only false (V.frameOf rv))))
let rv_loc_elems_parent_disj #a #rst #rg h rv i j =
rs_loc_elems_parent_disj rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val rs_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> drid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (V.forall_seq rs i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_all_regions_from false drid)))
(decreases j)
let rec rs_loc_elems_each_disj #a #rst rg rs drid i j =
if i = j then ()
else rs_loc_elems_each_disj rg rs drid i (j - 1)
val rv_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
drid:HS.rid ->
Lemma (requires (V.forall_ h rv i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_all_regions_from false drid)))
let rv_loc_elems_each_disj #a #rst #rg h rv i j drid =
rs_loc_elems_each_disj rg (V.as_seq h rv) drid (U32.v i) (U32.v j)
// Preservation based on disjointness
val rv_loc_elems_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
loc_disjoint p (V.loc_vector_within rv i j) /\
modifies p h0 h1))
(ensures (rv_loc_elems h0 rv i j ==
rv_loc_elems h1 rv i j))
(decreases (U32.v j))
let rec rv_loc_elems_preserved #a #rst #rg rv i j p h0 h1 =
if i = j then ()
else (V.loc_vector_within_includes rv i j (j - 1ul) j;
V.get_preserved rv (j - 1ul) p h0 h1;
assert (V.get h0 rv (j - 1ul) == V.get h1 rv (j - 1ul));
V.loc_vector_within_includes rv i j i (j - 1ul);
rv_loc_elems_preserved rv i (j - 1ul) p h0 h1)
val rs_elems_inv_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv rg h1 rs i j))
(decreases j)
let rec rs_elems_inv_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val rv_elems_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (V.loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
modifies p h0 h1))
(ensures (rv_elems_inv h1 rv i j))
let rv_elems_inv_preserved #a #rst #rg rv i j p h0 h1 =
rs_elems_inv_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val rv_inv_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_ #a #rst #rg rv p h0 h1 =
rv_elems_inv_preserved #a #rst #rg rv 0ul (V.size_of rv) p h0 h1
// The first core lemma of `rvector`
val rv_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let rv_inv_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
rv_inv_preserved_ rv p h0 h1
val rv_inv_preserved_int:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
modifies (loc_all_regions_from false
(Rgl?.region_of rg (V.get h0 rv i))) h0 h1 /\
rg_inv rg h1 (V.get h1 rv i)))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_int #a #rst #rg rv i h0 h1 =
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv)) 0 (U32.v i) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) 0 (U32.v i)
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1;
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv))
(U32.v i + 1) (U32.v (V.size_of rv)) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) (U32.v i + 1) (U32.v (V.size_of rv))
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1
/// Representation
val as_seq_seq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = j - i})
(decreases j)
let rec as_seq_seq #a #rst rg h rs i j =
if i = j then S.empty
else S.snoc (as_seq_seq rg h rs i (j - 1))
(Rgl?.r_repr rg h (S.index rs (j - 1)))
val as_seq_sub:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t ->
j:uint32_t{
i <= j /\
j <= V.size_of rv /\
rv_elems_inv h rv i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v j - U32.v i})
(decreases (U32.v j)) | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val as_seq_sub:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t ->
j:uint32_t{
i <= j /\
j <= V.size_of rv /\
rv_elems_inv h rv i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v j - U32.v i})
(decreases (U32.v j)) | [] | LowStar.RVector.as_seq_sub | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
h: FStar.Monotonic.HyperStack.mem ->
rv: LowStar.RVector.rvector rg ->
i: LowStar.Vector.uint32_t ->
j:
LowStar.Vector.uint32_t
{i <= j /\ j <= LowStar.Vector.size_of rv /\ LowStar.RVector.rv_elems_inv h rv i j}
-> Prims.GTot
(s:
FStar.Seq.Base.seq (Rgl?.repr rg)
{FStar.Seq.Base.length s = FStar.UInt32.v j - FStar.UInt32.v i}) | {
"end_col": 53,
"end_line": 524,
"start_col": 2,
"start_line": 524
} |
FStar.Pervasives.Lemma | val rs_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> drid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (V.forall_seq rs i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_all_regions_from false drid)))
(decreases j) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rec rs_loc_elems_each_disj #a #rst rg rs drid i j =
if i = j then ()
else rs_loc_elems_each_disj rg rs drid i (j - 1) | val rs_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> drid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (V.forall_seq rs i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_all_regions_from false drid)))
(decreases j)
let rec rs_loc_elems_each_disj #a #rst rg rs drid i j = | false | null | true | if i = j then () else rs_loc_elems_each_disj rg rs drid i (j - 1) | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"lemma",
""
] | [
"LowStar.Regional.regional",
"FStar.Seq.Base.seq",
"FStar.Monotonic.HyperHeap.rid",
"FStar.Integers.nat",
"Prims.b2t",
"Prims.op_AmpAmp",
"FStar.Integers.op_Less_Equals",
"FStar.Integers.Signed",
"FStar.Integers.Winfinite",
"FStar.Seq.Base.length",
"Prims.op_Equality",
"Prims.bool",
"LowStar.RVector.rs_loc_elems_each_disj",
"FStar.Integers.op_Subtraction",
"Prims.unit"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1))
val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j)))
let rv_loc_elems_included #a #rst #rg h rv i j =
rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
// Properties about disjointness of locations
val rs_loc_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
l:nat{i <= l && l < j && k <> l} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj #a #rst rg rs prid i j k l = ()
val rs_loc_elem_disj_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (
forall (k:nat{i <= k && k < j}).
forall (l:nat{i <= l && l < j && k <> l}).
loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj_forall #a #rst rg rs prid i j = ()
val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2)
let rec rs_loc_elems_elem_disj #a #rst rg rs prid i j k1 k2 l =
if k1 = k2 then ()
else (rs_loc_elem_disj rg rs prid i j (k2 - 1) l;
rs_loc_elems_elem_disj rg rs prid i j k1 (k2 - 1) l)
val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2)
let rec rs_loc_elems_disj #a #rst rg rs prid i j k1 k2 l1 l2 =
if k1 = k2 then ()
else (rs_loc_elems_elem_disj rg rs prid i j l1 l2 (k2 - 1);
rs_loc_elems_disj rg rs prid i j k1 (k2 - 1) l1 l2)
val rv_loc_elems_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
k1:uint32_t{i <= k1} ->
k2:uint32_t{k1 <= k2 && k2 <= j} ->
l1:uint32_t{i <= l1} ->
l2:uint32_t{l1 <= l2 && l2 <= j} ->
Lemma (requires (rv_elems_reg h rv i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rv_loc_elems h rv k1 k2)
(rv_loc_elems h rv l1 l2)))
let rv_loc_elems_disj #a #rst #rg h rv i j k1 k2 l1 l2 =
rs_loc_elems_disj rg (V.as_seq h rv) (V.frameOf rv)
(U32.v i) (U32.v j) (U32.v k1) (U32.v k2) (U32.v l1) (U32.v l2)
val rs_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_region_only false prid)))
(decreases j)
let rec rs_loc_elems_parent_disj #a #rst rg rs prid i j =
if i = j then ()
else rs_loc_elems_parent_disj rg rs prid i (j - 1)
val rv_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_region_only false (V.frameOf rv))))
let rv_loc_elems_parent_disj #a #rst #rg h rv i j =
rs_loc_elems_parent_disj rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val rs_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> drid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (V.forall_seq rs i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_all_regions_from false drid)))
(decreases j) | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val rs_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> drid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (V.forall_seq rs i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_all_regions_from false drid)))
(decreases j) | [
"recursion"
] | LowStar.RVector.rs_loc_elems_each_disj | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
rg: LowStar.Regional.regional rst a ->
rs: FStar.Seq.Base.seq a ->
drid: FStar.Monotonic.HyperHeap.rid ->
i: FStar.Integers.nat ->
j: FStar.Integers.nat{i <= j && j <= FStar.Seq.Base.length rs}
-> FStar.Pervasives.Lemma
(requires
LowStar.Vector.forall_seq rs
i
j
(fun r -> FStar.Monotonic.HyperHeap.disjoint (Rgl?.region_of rg r) drid))
(ensures
LowStar.Monotonic.Buffer.loc_disjoint (LowStar.RVector.rs_loc_elems rg rs i j)
(LowStar.Monotonic.Buffer.loc_all_regions_from false drid))
(decreases j) | {
"end_col": 50,
"end_line": 381,
"start_col": 2,
"start_line": 380
} |
FStar.Pervasives.Lemma | val rv_loc_elems_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
k1:uint32_t{i <= k1} ->
k2:uint32_t{k1 <= k2 && k2 <= j} ->
l1:uint32_t{i <= l1} ->
l2:uint32_t{l1 <= l2 && l2 <= j} ->
Lemma (requires (rv_elems_reg h rv i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rv_loc_elems h rv k1 k2)
(rv_loc_elems h rv l1 l2))) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rv_loc_elems_disj #a #rst #rg h rv i j k1 k2 l1 l2 =
rs_loc_elems_disj rg (V.as_seq h rv) (V.frameOf rv)
(U32.v i) (U32.v j) (U32.v k1) (U32.v k2) (U32.v l1) (U32.v l2) | val rv_loc_elems_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
k1:uint32_t{i <= k1} ->
k2:uint32_t{k1 <= k2 && k2 <= j} ->
l1:uint32_t{i <= l1} ->
l2:uint32_t{l1 <= l2 && l2 <= j} ->
Lemma (requires (rv_elems_reg h rv i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rv_loc_elems h rv k1 k2)
(rv_loc_elems h rv l1 l2)))
let rv_loc_elems_disj #a #rst #rg h rv i j k1 k2 l1 l2 = | false | null | true | rs_loc_elems_disj rg
(V.as_seq h rv)
(V.frameOf rv)
(U32.v i)
(U32.v j)
(U32.v k1)
(U32.v k2)
(U32.v l1)
(U32.v l2) | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"lemma"
] | [
"LowStar.Regional.regional",
"FStar.Monotonic.HyperStack.mem",
"LowStar.RVector.rvector",
"LowStar.Vector.uint32_t",
"Prims.b2t",
"Prims.op_AmpAmp",
"FStar.Integers.op_Less_Equals",
"FStar.Integers.Unsigned",
"FStar.Integers.W32",
"LowStar.Vector.size_of",
"LowStar.RVector.rs_loc_elems_disj",
"LowStar.Vector.as_seq",
"LowStar.Vector.frameOf",
"FStar.UInt32.v",
"Prims.unit"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1))
val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j)))
let rv_loc_elems_included #a #rst #rg h rv i j =
rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
// Properties about disjointness of locations
val rs_loc_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
l:nat{i <= l && l < j && k <> l} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj #a #rst rg rs prid i j k l = ()
val rs_loc_elem_disj_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (
forall (k:nat{i <= k && k < j}).
forall (l:nat{i <= l && l < j && k <> l}).
loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj_forall #a #rst rg rs prid i j = ()
val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2)
let rec rs_loc_elems_elem_disj #a #rst rg rs prid i j k1 k2 l =
if k1 = k2 then ()
else (rs_loc_elem_disj rg rs prid i j (k2 - 1) l;
rs_loc_elems_elem_disj rg rs prid i j k1 (k2 - 1) l)
val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2)
let rec rs_loc_elems_disj #a #rst rg rs prid i j k1 k2 l1 l2 =
if k1 = k2 then ()
else (rs_loc_elems_elem_disj rg rs prid i j l1 l2 (k2 - 1);
rs_loc_elems_disj rg rs prid i j k1 (k2 - 1) l1 l2)
val rv_loc_elems_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
k1:uint32_t{i <= k1} ->
k2:uint32_t{k1 <= k2 && k2 <= j} ->
l1:uint32_t{i <= l1} ->
l2:uint32_t{l1 <= l2 && l2 <= j} ->
Lemma (requires (rv_elems_reg h rv i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rv_loc_elems h rv k1 k2)
(rv_loc_elems h rv l1 l2))) | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val rv_loc_elems_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
k1:uint32_t{i <= k1} ->
k2:uint32_t{k1 <= k2 && k2 <= j} ->
l1:uint32_t{i <= l1} ->
l2:uint32_t{l1 <= l2 && l2 <= j} ->
Lemma (requires (rv_elems_reg h rv i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rv_loc_elems h rv k1 k2)
(rv_loc_elems h rv l1 l2))) | [] | LowStar.RVector.rv_loc_elems_disj | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
h: FStar.Monotonic.HyperStack.mem ->
rv: LowStar.RVector.rvector rg ->
i: LowStar.Vector.uint32_t ->
j: LowStar.Vector.uint32_t{i <= j && j <= LowStar.Vector.size_of rv} ->
k1: LowStar.Vector.uint32_t{i <= k1} ->
k2: LowStar.Vector.uint32_t{k1 <= k2 && k2 <= j} ->
l1: LowStar.Vector.uint32_t{i <= l1} ->
l2: LowStar.Vector.uint32_t{l1 <= l2 && l2 <= j}
-> FStar.Pervasives.Lemma (requires LowStar.RVector.rv_elems_reg h rv i j /\ k2 <= l1 || l2 <= k1)
(ensures
LowStar.Monotonic.Buffer.loc_disjoint (LowStar.RVector.rv_loc_elems h rv k1 k2)
(LowStar.RVector.rv_loc_elems h rv l1 l2)) | {
"end_col": 67,
"end_line": 346,
"start_col": 2,
"start_line": 345
} |
FStar.HyperStack.ST.ST | val alloc_reserve:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} -> rid:HST.erid ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
V.frameOf rv = rid /\
V.size_of rv = 0ul /\
S.equal (as_seq h1 rv) S.empty /\
Set.equal (Map.domain (HS.get_hmap h0))
(Map.domain (HS.get_hmap h1)) /\
B.fresh_loc (V.loc_vector rv) h0 h1)) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let alloc_reserve #a #rst rg len rid =
V.alloc_reserve len (rg_dummy rg) rid | val alloc_reserve:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} -> rid:HST.erid ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
V.frameOf rv = rid /\
V.size_of rv = 0ul /\
S.equal (as_seq h1 rv) S.empty /\
Set.equal (Map.domain (HS.get_hmap h0))
(Map.domain (HS.get_hmap h1)) /\
B.fresh_loc (V.loc_vector rv) h0 h1))
let alloc_reserve #a #rst rg len rid = | true | null | false | V.alloc_reserve len (rg_dummy rg) rid | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [] | [
"LowStar.Regional.regional",
"LowStar.Vector.uint32_t",
"Prims.b2t",
"FStar.Integers.op_Greater",
"FStar.Integers.Unsigned",
"FStar.Integers.W32",
"FStar.UInt32.__uint_to_t",
"FStar.HyperStack.ST.erid",
"LowStar.Vector.alloc_reserve",
"LowStar.Regional.rg_dummy",
"LowStar.Vector.vector",
"LowStar.RVector.rvector"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1))
val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j)))
let rv_loc_elems_included #a #rst #rg h rv i j =
rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
// Properties about disjointness of locations
val rs_loc_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
l:nat{i <= l && l < j && k <> l} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj #a #rst rg rs prid i j k l = ()
val rs_loc_elem_disj_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (
forall (k:nat{i <= k && k < j}).
forall (l:nat{i <= l && l < j && k <> l}).
loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj_forall #a #rst rg rs prid i j = ()
val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2)
let rec rs_loc_elems_elem_disj #a #rst rg rs prid i j k1 k2 l =
if k1 = k2 then ()
else (rs_loc_elem_disj rg rs prid i j (k2 - 1) l;
rs_loc_elems_elem_disj rg rs prid i j k1 (k2 - 1) l)
val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2)
let rec rs_loc_elems_disj #a #rst rg rs prid i j k1 k2 l1 l2 =
if k1 = k2 then ()
else (rs_loc_elems_elem_disj rg rs prid i j l1 l2 (k2 - 1);
rs_loc_elems_disj rg rs prid i j k1 (k2 - 1) l1 l2)
val rv_loc_elems_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
k1:uint32_t{i <= k1} ->
k2:uint32_t{k1 <= k2 && k2 <= j} ->
l1:uint32_t{i <= l1} ->
l2:uint32_t{l1 <= l2 && l2 <= j} ->
Lemma (requires (rv_elems_reg h rv i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rv_loc_elems h rv k1 k2)
(rv_loc_elems h rv l1 l2)))
let rv_loc_elems_disj #a #rst #rg h rv i j k1 k2 l1 l2 =
rs_loc_elems_disj rg (V.as_seq h rv) (V.frameOf rv)
(U32.v i) (U32.v j) (U32.v k1) (U32.v k2) (U32.v l1) (U32.v l2)
val rs_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_region_only false prid)))
(decreases j)
let rec rs_loc_elems_parent_disj #a #rst rg rs prid i j =
if i = j then ()
else rs_loc_elems_parent_disj rg rs prid i (j - 1)
val rv_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_region_only false (V.frameOf rv))))
let rv_loc_elems_parent_disj #a #rst #rg h rv i j =
rs_loc_elems_parent_disj rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val rs_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> drid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (V.forall_seq rs i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_all_regions_from false drid)))
(decreases j)
let rec rs_loc_elems_each_disj #a #rst rg rs drid i j =
if i = j then ()
else rs_loc_elems_each_disj rg rs drid i (j - 1)
val rv_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
drid:HS.rid ->
Lemma (requires (V.forall_ h rv i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_all_regions_from false drid)))
let rv_loc_elems_each_disj #a #rst #rg h rv i j drid =
rs_loc_elems_each_disj rg (V.as_seq h rv) drid (U32.v i) (U32.v j)
// Preservation based on disjointness
val rv_loc_elems_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
loc_disjoint p (V.loc_vector_within rv i j) /\
modifies p h0 h1))
(ensures (rv_loc_elems h0 rv i j ==
rv_loc_elems h1 rv i j))
(decreases (U32.v j))
let rec rv_loc_elems_preserved #a #rst #rg rv i j p h0 h1 =
if i = j then ()
else (V.loc_vector_within_includes rv i j (j - 1ul) j;
V.get_preserved rv (j - 1ul) p h0 h1;
assert (V.get h0 rv (j - 1ul) == V.get h1 rv (j - 1ul));
V.loc_vector_within_includes rv i j i (j - 1ul);
rv_loc_elems_preserved rv i (j - 1ul) p h0 h1)
val rs_elems_inv_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv rg h1 rs i j))
(decreases j)
let rec rs_elems_inv_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val rv_elems_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (V.loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
modifies p h0 h1))
(ensures (rv_elems_inv h1 rv i j))
let rv_elems_inv_preserved #a #rst #rg rv i j p h0 h1 =
rs_elems_inv_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val rv_inv_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_ #a #rst #rg rv p h0 h1 =
rv_elems_inv_preserved #a #rst #rg rv 0ul (V.size_of rv) p h0 h1
// The first core lemma of `rvector`
val rv_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let rv_inv_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
rv_inv_preserved_ rv p h0 h1
val rv_inv_preserved_int:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
modifies (loc_all_regions_from false
(Rgl?.region_of rg (V.get h0 rv i))) h0 h1 /\
rg_inv rg h1 (V.get h1 rv i)))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_int #a #rst #rg rv i h0 h1 =
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv)) 0 (U32.v i) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) 0 (U32.v i)
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1;
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv))
(U32.v i + 1) (U32.v (V.size_of rv)) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) (U32.v i + 1) (U32.v (V.size_of rv))
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1
/// Representation
val as_seq_seq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = j - i})
(decreases j)
let rec as_seq_seq #a #rst rg h rs i j =
if i = j then S.empty
else S.snoc (as_seq_seq rg h rs i (j - 1))
(Rgl?.r_repr rg h (S.index rs (j - 1)))
val as_seq_sub:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t ->
j:uint32_t{
i <= j /\
j <= V.size_of rv /\
rv_elems_inv h rv i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v j - U32.v i})
(decreases (U32.v j))
let as_seq_sub #a #rst #rg h rv i j =
as_seq_seq rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v (V.size_of rv)})
let as_seq #a #rst #rg h rv =
as_seq_sub h rv 0ul (V.size_of rv)
val as_seq_sub_as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
Lemma (S.equal (as_seq_sub h rv 0ul (V.size_of rv))
(as_seq h rv))
[SMTPat (as_seq_sub h rv 0ul (V.size_of rv))]
let as_seq_sub_as_seq #a #rst #rg h rv = ()
val as_seq_seq_index:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat{k < j - i} ->
Lemma (requires true)
(ensures (S.index (as_seq_seq rg h rs i j) k ==
Rgl?.r_repr rg h (S.index rs (i + k))))
(decreases j)
[SMTPat (S.index (as_seq_seq rg h rs i j) k)]
let rec as_seq_seq_index #a #rst rg h rs i j k =
if i = j then ()
else if k = j - i - 1 then ()
else as_seq_seq_index rg h rs i (j - 1) k
val as_seq_seq_eq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs1:S.seq a -> rs2:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs1 /\ rs_elems_inv rg h rs1 i j} ->
k:nat ->
l:nat{k <= l /\ l <= S.length rs2 /\ rs_elems_inv rg h rs2 k l} ->
Lemma (requires (S.equal (S.slice rs1 i j) (S.slice rs2 k l)))
(ensures (S.equal (as_seq_seq rg h rs1 i j)
(as_seq_seq rg h rs2 k l)))
let as_seq_seq_eq #a #rst rg h rs1 rs2 i j k l =
assert (forall (a:nat{a < j - i}).
S.index (as_seq_seq rg h rs1 i j) a ==
Rgl?.r_repr rg h (S.index rs1 (i + a)));
assert (forall (a:nat{a < l - k}).
S.index (as_seq_seq rg h rs2 k l) a ==
Rgl?.r_repr rg h (S.index rs2 (k + a)));
assert (S.length (S.slice rs1 i j) = j - i);
assert (S.length (S.slice rs2 k l) = l - k);
assert (forall (a:nat{a < j - i}).
S.index (S.slice rs1 i j) a ==
S.index (S.slice rs2 k l) a);
assert (forall (a:nat{a < j - i}).
S.index rs1 (i + a) == S.index rs2 (k + a))
val as_seq_seq_slice:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat -> l:nat{k <= l && l <= j - i} ->
Lemma (S.equal (S.slice (as_seq_seq rg h rs i j) k l)
(as_seq_seq rg h (S.slice rs (i + k) (i + l)) 0 (l - k)))
#reset-options "--z3rlimit 10"
let rec as_seq_seq_slice #a #rst rg h rs i j k l =
if k = l then ()
else (as_seq_seq_slice rg h rs i j k (l - 1);
as_seq_seq_index rg h rs i j (l - 1);
as_seq_seq_eq rg h
(S.slice rs (i + k) (i + l - 1))
(S.slice rs (i + k) (i + l))
0 (l - k - 1) 0 (l - k - 1))
val as_seq_seq_upd:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{
i <= j /\
j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
k:nat{i <= k && k < j} -> v:a{rg_inv rg h v} ->
Lemma (S.equal (as_seq_seq rg h (S.upd rs k v) i j)
(S.upd (as_seq_seq rg h rs i j) (k - i)
(Rgl?.r_repr rg h v)))
let rec as_seq_seq_upd #a #rst rg h rs i j k v =
if i = j then ()
else if k = j - 1 then ()
else as_seq_seq_upd rg h rs i (j - 1) k v
// Preservation based on disjointness
val as_seq_seq_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv_preserved rg rs i j p h0 h1;
S.equal (as_seq_seq rg h0 rs i j)
(as_seq_seq rg h1 rs i j)))
let rec as_seq_seq_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
as_seq_seq_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val as_seq_sub_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
loc_disjoint p (V.loc_vector rv) /\
modifies p h0 h1))
(ensures (rv_elems_inv_preserved rv i j p h0 h1;
S.equal (as_seq_sub h0 rv i j)
(as_seq_sub h1 rv i j)))
let as_seq_sub_preserved #a #rst #rg rv i j p h0 h1 =
as_seq_seq_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val as_seq_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv_preserved_ rv p h0 h1;
S.equal (as_seq h0 rv) (as_seq h1 rv)))
let as_seq_preserved_ #a #rst #rg rv p h0 h1 =
as_seq_sub_preserved rv 0ul (V.size_of rv) p h0 h1
// The second core lemma of `rvector`
val as_seq_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv_preserved rv p h0 h1;
S.equal (as_seq h0 rv) (as_seq h1 rv)))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let as_seq_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
as_seq_preserved_ rv p h0 h1
/// Construction
val alloc_empty:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 bv h1 -> h0 == h1 /\ V.size_of bv = 0ul))
let alloc_empty #a #rst rg =
V.alloc_empty a
val alloc_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
cidx:uint32_t{cidx <= V.size_of rv} ->
HST.ST unit
(requires (fun h0 -> rv_itself_inv h0 rv))
(ensures (fun h0 _ h1 ->
modifies (V.loc_vector_within rv 0ul cidx) h0 h1 /\
rv_itself_inv h1 rv /\
rv_elems_inv h1 rv 0ul cidx /\
rv_elems_reg h1 rv 0ul cidx /\
S.equal (as_seq_sub h1 rv 0ul cidx)
(S.create (U32.v cidx) (Ghost.reveal (Rgl?.irepr rg))) /\
// the loop invariant for this function
V.forall_ h1 rv 0ul cidx
(fun r -> HS.fresh_region (Rgl?.region_of rg r) h0 h1 /\
Rgl?.r_alloc_p rg r) /\
Set.subset (Map.domain (HS.get_hmap h0))
(Map.domain (HS.get_hmap h1))))
(decreases (U32.v cidx))
#reset-options "--z3rlimit 20"
let rec alloc_ #a #rst #rg rv cidx =
let hh0 = HST.get () in
if cidx = 0ul then ()
else (let nrid = HST.new_region (V.frameOf rv) in
let v = rg_alloc rg nrid in
let hh1 = HST.get () in
V.assign rv (cidx - 1ul) v;
let hh2 = HST.get () in
V.loc_vector_within_included rv (cidx - 1ul) cidx;
Rgl?.r_sep
rg (V.get hh2 rv (cidx - 1ul))
(V.loc_vector_within rv (cidx - 1ul) cidx)
hh1 hh2;
alloc_ rv (cidx - 1ul);
let hh3 = HST.get () in
V.loc_vector_within_included rv 0ul (cidx - 1ul);
Rgl?.r_sep
rg (V.get hh3 rv (cidx - 1ul))
(V.loc_vector_within rv 0ul (cidx - 1ul))
hh2 hh3;
V.forall2_extend hh3 rv 0ul (cidx - 1ul)
(fun r1 r2 -> HS.disjoint (Rgl?.region_of rg r1)
(Rgl?.region_of rg r2));
V.loc_vector_within_union_rev rv 0ul cidx)
val alloc_rid:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} -> rid:HST.erid ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
V.frameOf rv = rid /\
V.size_of rv = len /\
V.forall_all h1 rv (fun r -> Rgl?.r_alloc_p rg r) /\
S.equal (as_seq h1 rv)
(S.create (U32.v len) (Ghost.reveal (Rgl?.irepr rg)))))
let alloc_rid #a #rst rg len rid =
let vec = V.alloc_rid len (rg_dummy rg) rid in
alloc_ #a #rst #rg vec len;
V.loc_vector_within_included vec 0ul len;
vec
val alloc_reserve:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} -> rid:HST.erid ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
V.frameOf rv = rid /\
V.size_of rv = 0ul /\
S.equal (as_seq h1 rv) S.empty /\
Set.equal (Map.domain (HS.get_hmap h0))
(Map.domain (HS.get_hmap h1)) /\
B.fresh_loc (V.loc_vector rv) h0 h1)) | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val alloc_reserve:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} -> rid:HST.erid ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
V.frameOf rv = rid /\
V.size_of rv = 0ul /\
S.equal (as_seq h1 rv) S.empty /\
Set.equal (Map.domain (HS.get_hmap h0))
(Map.domain (HS.get_hmap h1)) /\
B.fresh_loc (V.loc_vector rv) h0 h1)) | [] | LowStar.RVector.alloc_reserve | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
rg: LowStar.Regional.regional rst a ->
len: LowStar.Vector.uint32_t{len > 0ul} ->
rid: FStar.HyperStack.ST.erid
-> FStar.HyperStack.ST.ST (LowStar.RVector.rvector rg) | {
"end_col": 39,
"end_line": 776,
"start_col": 2,
"start_line": 776
} |
FStar.Pervasives.Lemma | val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r)))) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1)) | val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j = | false | null | true | if i = j
then ()
else
(Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1)) | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"lemma"
] | [
"LowStar.Regional.regional",
"FStar.Monotonic.HyperStack.mem",
"FStar.Seq.Base.seq",
"FStar.Integers.nat",
"Prims.b2t",
"Prims.op_AmpAmp",
"FStar.Integers.op_Less_Equals",
"FStar.Integers.Signed",
"FStar.Integers.Winfinite",
"FStar.Seq.Base.length",
"Prims.op_Equality",
"Prims.bool",
"LowStar.RVector.rs_elems_inv_live_region",
"FStar.Integers.op_Subtraction",
"Prims.unit",
"LowStar.Regional.__proj__Rgl__item__r_inv_reg",
"FStar.Seq.Base.index"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r)))) | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r)))) | [
"recursion"
] | LowStar.RVector.rs_elems_inv_live_region | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
rg: LowStar.Regional.regional rst a ->
h: FStar.Monotonic.HyperStack.mem ->
rs: FStar.Seq.Base.seq a ->
i: FStar.Integers.nat ->
j: FStar.Integers.nat{i <= j && j <= FStar.Seq.Base.length rs}
-> FStar.Pervasives.Lemma (requires LowStar.RVector.rs_elems_inv rg h rs i j)
(ensures
LowStar.Vector.forall_seq rs
i
j
(fun r -> FStar.Monotonic.HyperStack.live_region h (Rgl?.region_of rg r))) | {
"end_col": 50,
"end_line": 147,
"start_col": 2,
"start_line": 145
} |
FStar.Pervasives.Lemma | val rs_elems_inv_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv rg h1 rs i j))
(decreases j) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rec rs_elems_inv_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1) | val rs_elems_inv_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv rg h1 rs i j))
(decreases j)
let rec rs_elems_inv_preserved #a #rst rg rs i j p h0 h1 = | false | null | true | if i = j
then ()
else
(rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1) | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"lemma",
""
] | [
"LowStar.Regional.regional",
"FStar.Seq.Base.seq",
"FStar.Integers.nat",
"Prims.b2t",
"Prims.op_AmpAmp",
"FStar.Integers.op_Less_Equals",
"FStar.Integers.Signed",
"FStar.Integers.Winfinite",
"FStar.Seq.Base.length",
"LowStar.Monotonic.Buffer.loc",
"FStar.Monotonic.HyperStack.mem",
"Prims.op_Equality",
"Prims.bool",
"LowStar.Regional.__proj__Rgl__item__r_sep",
"FStar.Seq.Base.index",
"FStar.Integers.op_Subtraction",
"Prims.unit",
"LowStar.RVector.rs_elems_inv_preserved"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1))
val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j)))
let rv_loc_elems_included #a #rst #rg h rv i j =
rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
// Properties about disjointness of locations
val rs_loc_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
l:nat{i <= l && l < j && k <> l} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj #a #rst rg rs prid i j k l = ()
val rs_loc_elem_disj_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (
forall (k:nat{i <= k && k < j}).
forall (l:nat{i <= l && l < j && k <> l}).
loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj_forall #a #rst rg rs prid i j = ()
val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2)
let rec rs_loc_elems_elem_disj #a #rst rg rs prid i j k1 k2 l =
if k1 = k2 then ()
else (rs_loc_elem_disj rg rs prid i j (k2 - 1) l;
rs_loc_elems_elem_disj rg rs prid i j k1 (k2 - 1) l)
val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2)
let rec rs_loc_elems_disj #a #rst rg rs prid i j k1 k2 l1 l2 =
if k1 = k2 then ()
else (rs_loc_elems_elem_disj rg rs prid i j l1 l2 (k2 - 1);
rs_loc_elems_disj rg rs prid i j k1 (k2 - 1) l1 l2)
val rv_loc_elems_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
k1:uint32_t{i <= k1} ->
k2:uint32_t{k1 <= k2 && k2 <= j} ->
l1:uint32_t{i <= l1} ->
l2:uint32_t{l1 <= l2 && l2 <= j} ->
Lemma (requires (rv_elems_reg h rv i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rv_loc_elems h rv k1 k2)
(rv_loc_elems h rv l1 l2)))
let rv_loc_elems_disj #a #rst #rg h rv i j k1 k2 l1 l2 =
rs_loc_elems_disj rg (V.as_seq h rv) (V.frameOf rv)
(U32.v i) (U32.v j) (U32.v k1) (U32.v k2) (U32.v l1) (U32.v l2)
val rs_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_region_only false prid)))
(decreases j)
let rec rs_loc_elems_parent_disj #a #rst rg rs prid i j =
if i = j then ()
else rs_loc_elems_parent_disj rg rs prid i (j - 1)
val rv_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_region_only false (V.frameOf rv))))
let rv_loc_elems_parent_disj #a #rst #rg h rv i j =
rs_loc_elems_parent_disj rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val rs_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> drid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (V.forall_seq rs i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_all_regions_from false drid)))
(decreases j)
let rec rs_loc_elems_each_disj #a #rst rg rs drid i j =
if i = j then ()
else rs_loc_elems_each_disj rg rs drid i (j - 1)
val rv_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
drid:HS.rid ->
Lemma (requires (V.forall_ h rv i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_all_regions_from false drid)))
let rv_loc_elems_each_disj #a #rst #rg h rv i j drid =
rs_loc_elems_each_disj rg (V.as_seq h rv) drid (U32.v i) (U32.v j)
// Preservation based on disjointness
val rv_loc_elems_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
loc_disjoint p (V.loc_vector_within rv i j) /\
modifies p h0 h1))
(ensures (rv_loc_elems h0 rv i j ==
rv_loc_elems h1 rv i j))
(decreases (U32.v j))
let rec rv_loc_elems_preserved #a #rst #rg rv i j p h0 h1 =
if i = j then ()
else (V.loc_vector_within_includes rv i j (j - 1ul) j;
V.get_preserved rv (j - 1ul) p h0 h1;
assert (V.get h0 rv (j - 1ul) == V.get h1 rv (j - 1ul));
V.loc_vector_within_includes rv i j i (j - 1ul);
rv_loc_elems_preserved rv i (j - 1ul) p h0 h1)
val rs_elems_inv_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv rg h1 rs i j))
(decreases j) | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val rs_elems_inv_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv rg h1 rs i j))
(decreases j) | [
"recursion"
] | LowStar.RVector.rs_elems_inv_preserved | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
rg: LowStar.Regional.regional rst a ->
rs: FStar.Seq.Base.seq a ->
i: FStar.Integers.nat ->
j: FStar.Integers.nat{i <= j && j <= FStar.Seq.Base.length rs} ->
p: LowStar.Monotonic.Buffer.loc ->
h0: FStar.Monotonic.HyperStack.mem ->
h1: FStar.Monotonic.HyperStack.mem
-> FStar.Pervasives.Lemma
(requires
LowStar.RVector.rs_elems_inv rg h0 rs i j /\
LowStar.Monotonic.Buffer.loc_disjoint p (LowStar.RVector.rs_loc_elems rg rs i j) /\
LowStar.Monotonic.Buffer.modifies p h0 h1)
(ensures LowStar.RVector.rs_elems_inv rg h1 rs i j)
(decreases j) | {
"end_col": 50,
"end_line": 427,
"start_col": 2,
"start_line": 425
} |
FStar.Pervasives.Lemma | val rs_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_region_only false prid)))
(decreases j) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rec rs_loc_elems_parent_disj #a #rst rg rs prid i j =
if i = j then ()
else rs_loc_elems_parent_disj rg rs prid i (j - 1) | val rs_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_region_only false prid)))
(decreases j)
let rec rs_loc_elems_parent_disj #a #rst rg rs prid i j = | false | null | true | if i = j then () else rs_loc_elems_parent_disj rg rs prid i (j - 1) | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"lemma",
""
] | [
"LowStar.Regional.regional",
"FStar.Seq.Base.seq",
"FStar.Monotonic.HyperHeap.rid",
"FStar.Integers.nat",
"Prims.b2t",
"Prims.op_AmpAmp",
"FStar.Integers.op_Less_Equals",
"FStar.Integers.Signed",
"FStar.Integers.Winfinite",
"FStar.Seq.Base.length",
"Prims.op_Equality",
"Prims.bool",
"LowStar.RVector.rs_loc_elems_parent_disj",
"FStar.Integers.op_Subtraction",
"Prims.unit"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1))
val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j)))
let rv_loc_elems_included #a #rst #rg h rv i j =
rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
// Properties about disjointness of locations
val rs_loc_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
l:nat{i <= l && l < j && k <> l} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj #a #rst rg rs prid i j k l = ()
val rs_loc_elem_disj_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (
forall (k:nat{i <= k && k < j}).
forall (l:nat{i <= l && l < j && k <> l}).
loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj_forall #a #rst rg rs prid i j = ()
val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2)
let rec rs_loc_elems_elem_disj #a #rst rg rs prid i j k1 k2 l =
if k1 = k2 then ()
else (rs_loc_elem_disj rg rs prid i j (k2 - 1) l;
rs_loc_elems_elem_disj rg rs prid i j k1 (k2 - 1) l)
val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2)
let rec rs_loc_elems_disj #a #rst rg rs prid i j k1 k2 l1 l2 =
if k1 = k2 then ()
else (rs_loc_elems_elem_disj rg rs prid i j l1 l2 (k2 - 1);
rs_loc_elems_disj rg rs prid i j k1 (k2 - 1) l1 l2)
val rv_loc_elems_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
k1:uint32_t{i <= k1} ->
k2:uint32_t{k1 <= k2 && k2 <= j} ->
l1:uint32_t{i <= l1} ->
l2:uint32_t{l1 <= l2 && l2 <= j} ->
Lemma (requires (rv_elems_reg h rv i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rv_loc_elems h rv k1 k2)
(rv_loc_elems h rv l1 l2)))
let rv_loc_elems_disj #a #rst #rg h rv i j k1 k2 l1 l2 =
rs_loc_elems_disj rg (V.as_seq h rv) (V.frameOf rv)
(U32.v i) (U32.v j) (U32.v k1) (U32.v k2) (U32.v l1) (U32.v l2)
val rs_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_region_only false prid)))
(decreases j) | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val rs_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_region_only false prid)))
(decreases j) | [
"recursion"
] | LowStar.RVector.rs_loc_elems_parent_disj | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
rg: LowStar.Regional.regional rst a ->
rs: FStar.Seq.Base.seq a ->
prid: FStar.Monotonic.HyperHeap.rid ->
i: FStar.Integers.nat ->
j: FStar.Integers.nat{i <= j && j <= FStar.Seq.Base.length rs}
-> FStar.Pervasives.Lemma (requires LowStar.RVector.rs_elems_reg rg rs prid i j)
(ensures
LowStar.Monotonic.Buffer.loc_disjoint (LowStar.RVector.rs_loc_elems rg rs i j)
(LowStar.Monotonic.Buffer.loc_region_only false prid))
(decreases j) | {
"end_col": 52,
"end_line": 358,
"start_col": 2,
"start_line": 357
} |
FStar.Pervasives.Lemma | val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1)) | val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j = | false | null | true | if i = j
then ()
else
(rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1)) | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"lemma",
""
] | [
"LowStar.Regional.regional",
"FStar.Seq.Base.seq",
"FStar.Monotonic.HyperHeap.rid",
"FStar.Integers.nat",
"Prims.b2t",
"Prims.op_AmpAmp",
"FStar.Integers.op_Less_Equals",
"FStar.Integers.Signed",
"FStar.Integers.Winfinite",
"FStar.Seq.Base.length",
"Prims.op_Equality",
"Prims.bool",
"LowStar.RVector.rs_loc_elems_included",
"FStar.Integers.op_Subtraction",
"Prims.unit",
"LowStar.RVector.rs_loc_elem_included"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j) | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j) | [
"recursion"
] | LowStar.RVector.rs_loc_elems_included | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
rg: LowStar.Regional.regional rst a ->
rs: FStar.Seq.Base.seq a ->
prid: FStar.Monotonic.HyperHeap.rid ->
i: FStar.Integers.nat ->
j: FStar.Integers.nat{i <= j && j <= FStar.Seq.Base.length rs}
-> FStar.Pervasives.Lemma (requires LowStar.RVector.rs_elems_reg rg rs prid i j)
(ensures
LowStar.Monotonic.Buffer.loc_includes (LowStar.RVector.loc_all_exts_from false prid)
(LowStar.RVector.rs_loc_elems rg rs i j))
(decreases j) | {
"end_col": 50,
"end_line": 263,
"start_col": 2,
"start_line": 261
} |
FStar.Pervasives.Lemma | val rv_inv_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv)) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rv_inv_preserved_ #a #rst #rg rv p h0 h1 =
rv_elems_inv_preserved #a #rst #rg rv 0ul (V.size_of rv) p h0 h1 | val rv_inv_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_ #a #rst #rg rv p h0 h1 = | false | null | true | rv_elems_inv_preserved #a #rst #rg rv 0ul (V.size_of rv) p h0 h1 | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"lemma"
] | [
"LowStar.Regional.regional",
"LowStar.RVector.rvector",
"LowStar.Monotonic.Buffer.loc",
"FStar.Monotonic.HyperStack.mem",
"LowStar.RVector.rv_elems_inv_preserved",
"FStar.UInt32.__uint_to_t",
"LowStar.Vector.size_of",
"Prims.unit"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1))
val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j)))
let rv_loc_elems_included #a #rst #rg h rv i j =
rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
// Properties about disjointness of locations
val rs_loc_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
l:nat{i <= l && l < j && k <> l} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj #a #rst rg rs prid i j k l = ()
val rs_loc_elem_disj_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (
forall (k:nat{i <= k && k < j}).
forall (l:nat{i <= l && l < j && k <> l}).
loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj_forall #a #rst rg rs prid i j = ()
val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2)
let rec rs_loc_elems_elem_disj #a #rst rg rs prid i j k1 k2 l =
if k1 = k2 then ()
else (rs_loc_elem_disj rg rs prid i j (k2 - 1) l;
rs_loc_elems_elem_disj rg rs prid i j k1 (k2 - 1) l)
val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2)
let rec rs_loc_elems_disj #a #rst rg rs prid i j k1 k2 l1 l2 =
if k1 = k2 then ()
else (rs_loc_elems_elem_disj rg rs prid i j l1 l2 (k2 - 1);
rs_loc_elems_disj rg rs prid i j k1 (k2 - 1) l1 l2)
val rv_loc_elems_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
k1:uint32_t{i <= k1} ->
k2:uint32_t{k1 <= k2 && k2 <= j} ->
l1:uint32_t{i <= l1} ->
l2:uint32_t{l1 <= l2 && l2 <= j} ->
Lemma (requires (rv_elems_reg h rv i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rv_loc_elems h rv k1 k2)
(rv_loc_elems h rv l1 l2)))
let rv_loc_elems_disj #a #rst #rg h rv i j k1 k2 l1 l2 =
rs_loc_elems_disj rg (V.as_seq h rv) (V.frameOf rv)
(U32.v i) (U32.v j) (U32.v k1) (U32.v k2) (U32.v l1) (U32.v l2)
val rs_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_region_only false prid)))
(decreases j)
let rec rs_loc_elems_parent_disj #a #rst rg rs prid i j =
if i = j then ()
else rs_loc_elems_parent_disj rg rs prid i (j - 1)
val rv_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_region_only false (V.frameOf rv))))
let rv_loc_elems_parent_disj #a #rst #rg h rv i j =
rs_loc_elems_parent_disj rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val rs_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> drid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (V.forall_seq rs i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_all_regions_from false drid)))
(decreases j)
let rec rs_loc_elems_each_disj #a #rst rg rs drid i j =
if i = j then ()
else rs_loc_elems_each_disj rg rs drid i (j - 1)
val rv_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
drid:HS.rid ->
Lemma (requires (V.forall_ h rv i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_all_regions_from false drid)))
let rv_loc_elems_each_disj #a #rst #rg h rv i j drid =
rs_loc_elems_each_disj rg (V.as_seq h rv) drid (U32.v i) (U32.v j)
// Preservation based on disjointness
val rv_loc_elems_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
loc_disjoint p (V.loc_vector_within rv i j) /\
modifies p h0 h1))
(ensures (rv_loc_elems h0 rv i j ==
rv_loc_elems h1 rv i j))
(decreases (U32.v j))
let rec rv_loc_elems_preserved #a #rst #rg rv i j p h0 h1 =
if i = j then ()
else (V.loc_vector_within_includes rv i j (j - 1ul) j;
V.get_preserved rv (j - 1ul) p h0 h1;
assert (V.get h0 rv (j - 1ul) == V.get h1 rv (j - 1ul));
V.loc_vector_within_includes rv i j i (j - 1ul);
rv_loc_elems_preserved rv i (j - 1ul) p h0 h1)
val rs_elems_inv_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv rg h1 rs i j))
(decreases j)
let rec rs_elems_inv_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val rv_elems_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (V.loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
modifies p h0 h1))
(ensures (rv_elems_inv h1 rv i j))
let rv_elems_inv_preserved #a #rst #rg rv i j p h0 h1 =
rs_elems_inv_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val rv_inv_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv)) | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val rv_inv_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv)) | [] | LowStar.RVector.rv_inv_preserved_ | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
rv: LowStar.RVector.rvector rg ->
p: LowStar.Monotonic.Buffer.loc ->
h0: FStar.Monotonic.HyperStack.mem ->
h1: FStar.Monotonic.HyperStack.mem
-> FStar.Pervasives.Lemma
(requires
LowStar.RVector.rv_inv h0 rv /\
LowStar.Monotonic.Buffer.loc_disjoint p (LowStar.Vector.loc_vector rv) /\
LowStar.Monotonic.Buffer.loc_disjoint p
(LowStar.RVector.rv_loc_elems h0 rv 0ul (LowStar.Vector.size_of rv)) /\
LowStar.Monotonic.Buffer.modifies p h0 h1) (ensures LowStar.RVector.rv_inv h1 rv) | {
"end_col": 66,
"end_line": 451,
"start_col": 2,
"start_line": 451
} |
FStar.Pervasives.Lemma | val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rec rs_loc_elems_elem_disj #a #rst rg rs prid i j k1 k2 l =
if k1 = k2 then ()
else (rs_loc_elem_disj rg rs prid i j (k2 - 1) l;
rs_loc_elems_elem_disj rg rs prid i j k1 (k2 - 1) l) | val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2)
let rec rs_loc_elems_elem_disj #a #rst rg rs prid i j k1 k2 l = | false | null | true | if k1 = k2
then ()
else
(rs_loc_elem_disj rg rs prid i j (k2 - 1) l;
rs_loc_elems_elem_disj rg rs prid i j k1 (k2 - 1) l) | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"lemma",
""
] | [
"LowStar.Regional.regional",
"FStar.Seq.Base.seq",
"FStar.Monotonic.HyperHeap.rid",
"FStar.Integers.nat",
"Prims.b2t",
"Prims.op_AmpAmp",
"FStar.Integers.op_Less_Equals",
"FStar.Integers.Signed",
"FStar.Integers.Winfinite",
"FStar.Seq.Base.length",
"FStar.Integers.op_Less",
"Prims.op_BarBar",
"Prims.op_Equality",
"Prims.l_or",
"Prims.bool",
"LowStar.RVector.rs_loc_elems_elem_disj",
"FStar.Integers.op_Subtraction",
"Prims.unit",
"LowStar.RVector.rs_loc_elem_disj"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1))
val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j)))
let rv_loc_elems_included #a #rst #rg h rv i j =
rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
// Properties about disjointness of locations
val rs_loc_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
l:nat{i <= l && l < j && k <> l} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj #a #rst rg rs prid i j k l = ()
val rs_loc_elem_disj_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (
forall (k:nat{i <= k && k < j}).
forall (l:nat{i <= l && l < j && k <> l}).
loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj_forall #a #rst rg rs prid i j = ()
val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2) | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2) | [
"recursion"
] | LowStar.RVector.rs_loc_elems_elem_disj | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
rg: LowStar.Regional.regional rst a ->
rs: FStar.Seq.Base.seq a ->
prid: FStar.Monotonic.HyperHeap.rid ->
i: FStar.Integers.nat ->
j: FStar.Integers.nat{i <= j && j <= FStar.Seq.Base.length rs} ->
k1: FStar.Integers.nat{i <= k1} ->
k2: FStar.Integers.nat{k1 <= k2 && k2 <= j} ->
l: FStar.Integers.nat{i <= l && l < j && (l < k1 || k2 <= l)}
-> FStar.Pervasives.Lemma (requires LowStar.RVector.rs_elems_reg rg rs prid i j)
(ensures
LowStar.Monotonic.Buffer.loc_disjoint (LowStar.RVector.rs_loc_elems rg rs k1 k2)
(LowStar.RVector.rs_loc_elem rg rs l))
(decreases k2) | {
"end_col": 59,
"end_line": 314,
"start_col": 2,
"start_line": 312
} |
FStar.Pervasives.Lemma | val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rec rs_loc_elems_disj #a #rst rg rs prid i j k1 k2 l1 l2 =
if k1 = k2 then ()
else (rs_loc_elems_elem_disj rg rs prid i j l1 l2 (k2 - 1);
rs_loc_elems_disj rg rs prid i j k1 (k2 - 1) l1 l2) | val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2)
let rec rs_loc_elems_disj #a #rst rg rs prid i j k1 k2 l1 l2 = | false | null | true | if k1 = k2
then ()
else
(rs_loc_elems_elem_disj rg rs prid i j l1 l2 (k2 - 1);
rs_loc_elems_disj rg rs prid i j k1 (k2 - 1) l1 l2) | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"lemma",
""
] | [
"LowStar.Regional.regional",
"FStar.Seq.Base.seq",
"FStar.Monotonic.HyperHeap.rid",
"FStar.Integers.nat",
"Prims.b2t",
"Prims.op_AmpAmp",
"FStar.Integers.op_Less_Equals",
"FStar.Integers.Signed",
"FStar.Integers.Winfinite",
"FStar.Seq.Base.length",
"Prims.op_Equality",
"Prims.l_or",
"Prims.bool",
"LowStar.RVector.rs_loc_elems_disj",
"FStar.Integers.op_Subtraction",
"Prims.unit",
"LowStar.RVector.rs_loc_elems_elem_disj"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1))
val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j)))
let rv_loc_elems_included #a #rst #rg h rv i j =
rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
// Properties about disjointness of locations
val rs_loc_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
l:nat{i <= l && l < j && k <> l} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj #a #rst rg rs prid i j k l = ()
val rs_loc_elem_disj_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (
forall (k:nat{i <= k && k < j}).
forall (l:nat{i <= l && l < j && k <> l}).
loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj_forall #a #rst rg rs prid i j = ()
val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2)
let rec rs_loc_elems_elem_disj #a #rst rg rs prid i j k1 k2 l =
if k1 = k2 then ()
else (rs_loc_elem_disj rg rs prid i j (k2 - 1) l;
rs_loc_elems_elem_disj rg rs prid i j k1 (k2 - 1) l)
val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2) | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2) | [
"recursion"
] | LowStar.RVector.rs_loc_elems_disj | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
rg: LowStar.Regional.regional rst a ->
rs: FStar.Seq.Base.seq a ->
prid: FStar.Monotonic.HyperHeap.rid ->
i: FStar.Integers.nat ->
j: FStar.Integers.nat{i <= j && j <= FStar.Seq.Base.length rs} ->
k1: FStar.Integers.nat{i <= k1} ->
k2: FStar.Integers.nat{k1 <= k2 && k2 <= j} ->
l1: FStar.Integers.nat{i <= l1} ->
l2: FStar.Integers.nat{l1 <= l2 && l2 <= j}
-> FStar.Pervasives.Lemma
(requires LowStar.RVector.rs_elems_reg rg rs prid i j /\ k2 <= l1 || l2 <= k1)
(ensures
LowStar.Monotonic.Buffer.loc_disjoint (LowStar.RVector.rs_loc_elems rg rs k1 k2)
(LowStar.RVector.rs_loc_elems rg rs l1 l2))
(decreases k2) | {
"end_col": 58,
"end_line": 331,
"start_col": 2,
"start_line": 329
} |
FStar.HyperStack.ST.ST | val alloc:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
HS.fresh_region (V.frameOf rv) h0 h1 /\
V.size_of rv = len /\
V.forall_all h1 rv (fun r -> Rgl?.r_alloc_p rg r) /\
S.equal (as_seq h1 rv)
(S.create (U32.v len) (Ghost.reveal (Rgl?.irepr rg))))) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let alloc #a #rst rg len =
let nrid = HST.new_region HS.root in
alloc_rid rg len nrid | val alloc:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
HS.fresh_region (V.frameOf rv) h0 h1 /\
V.size_of rv = len /\
V.forall_all h1 rv (fun r -> Rgl?.r_alloc_p rg r) /\
S.equal (as_seq h1 rv)
(S.create (U32.v len) (Ghost.reveal (Rgl?.irepr rg)))))
let alloc #a #rst rg len = | true | null | false | let nrid = HST.new_region HS.root in
alloc_rid rg len nrid | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [] | [
"LowStar.Regional.regional",
"LowStar.Vector.uint32_t",
"Prims.b2t",
"FStar.Integers.op_Greater",
"FStar.Integers.Unsigned",
"FStar.Integers.W32",
"FStar.UInt32.__uint_to_t",
"LowStar.RVector.alloc_rid",
"LowStar.RVector.rvector",
"FStar.Monotonic.HyperHeap.rid",
"FStar.HyperStack.ST.new_region",
"FStar.Monotonic.HyperHeap.root"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1))
val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j)))
let rv_loc_elems_included #a #rst #rg h rv i j =
rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
// Properties about disjointness of locations
val rs_loc_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
l:nat{i <= l && l < j && k <> l} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj #a #rst rg rs prid i j k l = ()
val rs_loc_elem_disj_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (
forall (k:nat{i <= k && k < j}).
forall (l:nat{i <= l && l < j && k <> l}).
loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj_forall #a #rst rg rs prid i j = ()
val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2)
let rec rs_loc_elems_elem_disj #a #rst rg rs prid i j k1 k2 l =
if k1 = k2 then ()
else (rs_loc_elem_disj rg rs prid i j (k2 - 1) l;
rs_loc_elems_elem_disj rg rs prid i j k1 (k2 - 1) l)
val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2)
let rec rs_loc_elems_disj #a #rst rg rs prid i j k1 k2 l1 l2 =
if k1 = k2 then ()
else (rs_loc_elems_elem_disj rg rs prid i j l1 l2 (k2 - 1);
rs_loc_elems_disj rg rs prid i j k1 (k2 - 1) l1 l2)
val rv_loc_elems_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
k1:uint32_t{i <= k1} ->
k2:uint32_t{k1 <= k2 && k2 <= j} ->
l1:uint32_t{i <= l1} ->
l2:uint32_t{l1 <= l2 && l2 <= j} ->
Lemma (requires (rv_elems_reg h rv i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rv_loc_elems h rv k1 k2)
(rv_loc_elems h rv l1 l2)))
let rv_loc_elems_disj #a #rst #rg h rv i j k1 k2 l1 l2 =
rs_loc_elems_disj rg (V.as_seq h rv) (V.frameOf rv)
(U32.v i) (U32.v j) (U32.v k1) (U32.v k2) (U32.v l1) (U32.v l2)
val rs_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_region_only false prid)))
(decreases j)
let rec rs_loc_elems_parent_disj #a #rst rg rs prid i j =
if i = j then ()
else rs_loc_elems_parent_disj rg rs prid i (j - 1)
val rv_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_region_only false (V.frameOf rv))))
let rv_loc_elems_parent_disj #a #rst #rg h rv i j =
rs_loc_elems_parent_disj rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val rs_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> drid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (V.forall_seq rs i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_all_regions_from false drid)))
(decreases j)
let rec rs_loc_elems_each_disj #a #rst rg rs drid i j =
if i = j then ()
else rs_loc_elems_each_disj rg rs drid i (j - 1)
val rv_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
drid:HS.rid ->
Lemma (requires (V.forall_ h rv i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_all_regions_from false drid)))
let rv_loc_elems_each_disj #a #rst #rg h rv i j drid =
rs_loc_elems_each_disj rg (V.as_seq h rv) drid (U32.v i) (U32.v j)
// Preservation based on disjointness
val rv_loc_elems_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
loc_disjoint p (V.loc_vector_within rv i j) /\
modifies p h0 h1))
(ensures (rv_loc_elems h0 rv i j ==
rv_loc_elems h1 rv i j))
(decreases (U32.v j))
let rec rv_loc_elems_preserved #a #rst #rg rv i j p h0 h1 =
if i = j then ()
else (V.loc_vector_within_includes rv i j (j - 1ul) j;
V.get_preserved rv (j - 1ul) p h0 h1;
assert (V.get h0 rv (j - 1ul) == V.get h1 rv (j - 1ul));
V.loc_vector_within_includes rv i j i (j - 1ul);
rv_loc_elems_preserved rv i (j - 1ul) p h0 h1)
val rs_elems_inv_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv rg h1 rs i j))
(decreases j)
let rec rs_elems_inv_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val rv_elems_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (V.loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
modifies p h0 h1))
(ensures (rv_elems_inv h1 rv i j))
let rv_elems_inv_preserved #a #rst #rg rv i j p h0 h1 =
rs_elems_inv_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val rv_inv_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_ #a #rst #rg rv p h0 h1 =
rv_elems_inv_preserved #a #rst #rg rv 0ul (V.size_of rv) p h0 h1
// The first core lemma of `rvector`
val rv_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let rv_inv_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
rv_inv_preserved_ rv p h0 h1
val rv_inv_preserved_int:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
modifies (loc_all_regions_from false
(Rgl?.region_of rg (V.get h0 rv i))) h0 h1 /\
rg_inv rg h1 (V.get h1 rv i)))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_int #a #rst #rg rv i h0 h1 =
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv)) 0 (U32.v i) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) 0 (U32.v i)
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1;
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv))
(U32.v i + 1) (U32.v (V.size_of rv)) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) (U32.v i + 1) (U32.v (V.size_of rv))
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1
/// Representation
val as_seq_seq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = j - i})
(decreases j)
let rec as_seq_seq #a #rst rg h rs i j =
if i = j then S.empty
else S.snoc (as_seq_seq rg h rs i (j - 1))
(Rgl?.r_repr rg h (S.index rs (j - 1)))
val as_seq_sub:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t ->
j:uint32_t{
i <= j /\
j <= V.size_of rv /\
rv_elems_inv h rv i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v j - U32.v i})
(decreases (U32.v j))
let as_seq_sub #a #rst #rg h rv i j =
as_seq_seq rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v (V.size_of rv)})
let as_seq #a #rst #rg h rv =
as_seq_sub h rv 0ul (V.size_of rv)
val as_seq_sub_as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
Lemma (S.equal (as_seq_sub h rv 0ul (V.size_of rv))
(as_seq h rv))
[SMTPat (as_seq_sub h rv 0ul (V.size_of rv))]
let as_seq_sub_as_seq #a #rst #rg h rv = ()
val as_seq_seq_index:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat{k < j - i} ->
Lemma (requires true)
(ensures (S.index (as_seq_seq rg h rs i j) k ==
Rgl?.r_repr rg h (S.index rs (i + k))))
(decreases j)
[SMTPat (S.index (as_seq_seq rg h rs i j) k)]
let rec as_seq_seq_index #a #rst rg h rs i j k =
if i = j then ()
else if k = j - i - 1 then ()
else as_seq_seq_index rg h rs i (j - 1) k
val as_seq_seq_eq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs1:S.seq a -> rs2:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs1 /\ rs_elems_inv rg h rs1 i j} ->
k:nat ->
l:nat{k <= l /\ l <= S.length rs2 /\ rs_elems_inv rg h rs2 k l} ->
Lemma (requires (S.equal (S.slice rs1 i j) (S.slice rs2 k l)))
(ensures (S.equal (as_seq_seq rg h rs1 i j)
(as_seq_seq rg h rs2 k l)))
let as_seq_seq_eq #a #rst rg h rs1 rs2 i j k l =
assert (forall (a:nat{a < j - i}).
S.index (as_seq_seq rg h rs1 i j) a ==
Rgl?.r_repr rg h (S.index rs1 (i + a)));
assert (forall (a:nat{a < l - k}).
S.index (as_seq_seq rg h rs2 k l) a ==
Rgl?.r_repr rg h (S.index rs2 (k + a)));
assert (S.length (S.slice rs1 i j) = j - i);
assert (S.length (S.slice rs2 k l) = l - k);
assert (forall (a:nat{a < j - i}).
S.index (S.slice rs1 i j) a ==
S.index (S.slice rs2 k l) a);
assert (forall (a:nat{a < j - i}).
S.index rs1 (i + a) == S.index rs2 (k + a))
val as_seq_seq_slice:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat -> l:nat{k <= l && l <= j - i} ->
Lemma (S.equal (S.slice (as_seq_seq rg h rs i j) k l)
(as_seq_seq rg h (S.slice rs (i + k) (i + l)) 0 (l - k)))
#reset-options "--z3rlimit 10"
let rec as_seq_seq_slice #a #rst rg h rs i j k l =
if k = l then ()
else (as_seq_seq_slice rg h rs i j k (l - 1);
as_seq_seq_index rg h rs i j (l - 1);
as_seq_seq_eq rg h
(S.slice rs (i + k) (i + l - 1))
(S.slice rs (i + k) (i + l))
0 (l - k - 1) 0 (l - k - 1))
val as_seq_seq_upd:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{
i <= j /\
j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
k:nat{i <= k && k < j} -> v:a{rg_inv rg h v} ->
Lemma (S.equal (as_seq_seq rg h (S.upd rs k v) i j)
(S.upd (as_seq_seq rg h rs i j) (k - i)
(Rgl?.r_repr rg h v)))
let rec as_seq_seq_upd #a #rst rg h rs i j k v =
if i = j then ()
else if k = j - 1 then ()
else as_seq_seq_upd rg h rs i (j - 1) k v
// Preservation based on disjointness
val as_seq_seq_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv_preserved rg rs i j p h0 h1;
S.equal (as_seq_seq rg h0 rs i j)
(as_seq_seq rg h1 rs i j)))
let rec as_seq_seq_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
as_seq_seq_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val as_seq_sub_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
loc_disjoint p (V.loc_vector rv) /\
modifies p h0 h1))
(ensures (rv_elems_inv_preserved rv i j p h0 h1;
S.equal (as_seq_sub h0 rv i j)
(as_seq_sub h1 rv i j)))
let as_seq_sub_preserved #a #rst #rg rv i j p h0 h1 =
as_seq_seq_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val as_seq_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv_preserved_ rv p h0 h1;
S.equal (as_seq h0 rv) (as_seq h1 rv)))
let as_seq_preserved_ #a #rst #rg rv p h0 h1 =
as_seq_sub_preserved rv 0ul (V.size_of rv) p h0 h1
// The second core lemma of `rvector`
val as_seq_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv_preserved rv p h0 h1;
S.equal (as_seq h0 rv) (as_seq h1 rv)))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let as_seq_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
as_seq_preserved_ rv p h0 h1
/// Construction
val alloc_empty:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 bv h1 -> h0 == h1 /\ V.size_of bv = 0ul))
let alloc_empty #a #rst rg =
V.alloc_empty a
val alloc_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
cidx:uint32_t{cidx <= V.size_of rv} ->
HST.ST unit
(requires (fun h0 -> rv_itself_inv h0 rv))
(ensures (fun h0 _ h1 ->
modifies (V.loc_vector_within rv 0ul cidx) h0 h1 /\
rv_itself_inv h1 rv /\
rv_elems_inv h1 rv 0ul cidx /\
rv_elems_reg h1 rv 0ul cidx /\
S.equal (as_seq_sub h1 rv 0ul cidx)
(S.create (U32.v cidx) (Ghost.reveal (Rgl?.irepr rg))) /\
// the loop invariant for this function
V.forall_ h1 rv 0ul cidx
(fun r -> HS.fresh_region (Rgl?.region_of rg r) h0 h1 /\
Rgl?.r_alloc_p rg r) /\
Set.subset (Map.domain (HS.get_hmap h0))
(Map.domain (HS.get_hmap h1))))
(decreases (U32.v cidx))
#reset-options "--z3rlimit 20"
let rec alloc_ #a #rst #rg rv cidx =
let hh0 = HST.get () in
if cidx = 0ul then ()
else (let nrid = HST.new_region (V.frameOf rv) in
let v = rg_alloc rg nrid in
let hh1 = HST.get () in
V.assign rv (cidx - 1ul) v;
let hh2 = HST.get () in
V.loc_vector_within_included rv (cidx - 1ul) cidx;
Rgl?.r_sep
rg (V.get hh2 rv (cidx - 1ul))
(V.loc_vector_within rv (cidx - 1ul) cidx)
hh1 hh2;
alloc_ rv (cidx - 1ul);
let hh3 = HST.get () in
V.loc_vector_within_included rv 0ul (cidx - 1ul);
Rgl?.r_sep
rg (V.get hh3 rv (cidx - 1ul))
(V.loc_vector_within rv 0ul (cidx - 1ul))
hh2 hh3;
V.forall2_extend hh3 rv 0ul (cidx - 1ul)
(fun r1 r2 -> HS.disjoint (Rgl?.region_of rg r1)
(Rgl?.region_of rg r2));
V.loc_vector_within_union_rev rv 0ul cidx)
val alloc_rid:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} -> rid:HST.erid ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
V.frameOf rv = rid /\
V.size_of rv = len /\
V.forall_all h1 rv (fun r -> Rgl?.r_alloc_p rg r) /\
S.equal (as_seq h1 rv)
(S.create (U32.v len) (Ghost.reveal (Rgl?.irepr rg)))))
let alloc_rid #a #rst rg len rid =
let vec = V.alloc_rid len (rg_dummy rg) rid in
alloc_ #a #rst #rg vec len;
V.loc_vector_within_included vec 0ul len;
vec
val alloc_reserve:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} -> rid:HST.erid ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
V.frameOf rv = rid /\
V.size_of rv = 0ul /\
S.equal (as_seq h1 rv) S.empty /\
Set.equal (Map.domain (HS.get_hmap h0))
(Map.domain (HS.get_hmap h1)) /\
B.fresh_loc (V.loc_vector rv) h0 h1))
let alloc_reserve #a #rst rg len rid =
V.alloc_reserve len (rg_dummy rg) rid
val alloc:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
HS.fresh_region (V.frameOf rv) h0 h1 /\
V.size_of rv = len /\
V.forall_all h1 rv (fun r -> Rgl?.r_alloc_p rg r) /\
S.equal (as_seq h1 rv) | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val alloc:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
HS.fresh_region (V.frameOf rv) h0 h1 /\
V.size_of rv = len /\
V.forall_all h1 rv (fun r -> Rgl?.r_alloc_p rg r) /\
S.equal (as_seq h1 rv)
(S.create (U32.v len) (Ghost.reveal (Rgl?.irepr rg))))) | [] | LowStar.RVector.alloc | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | rg: LowStar.Regional.regional rst a -> len: LowStar.Vector.uint32_t{len > 0ul}
-> FStar.HyperStack.ST.ST (LowStar.RVector.rvector rg) | {
"end_col": 23,
"end_line": 793,
"start_col": 26,
"start_line": 791
} |
Prims.GTot | val as_seq_seq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = j - i})
(decreases j) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rec as_seq_seq #a #rst rg h rs i j =
if i = j then S.empty
else S.snoc (as_seq_seq rg h rs i (j - 1))
(Rgl?.r_repr rg h (S.index rs (j - 1))) | val as_seq_seq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = j - i})
(decreases j)
let rec as_seq_seq #a #rst rg h rs i j = | false | null | false | if i = j
then S.empty
else S.snoc (as_seq_seq rg h rs i (j - 1)) (Rgl?.r_repr rg h (S.index rs (j - 1))) | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"sometrivial",
""
] | [
"LowStar.Regional.regional",
"FStar.Monotonic.HyperStack.mem",
"FStar.Seq.Base.seq",
"FStar.Integers.nat",
"Prims.l_and",
"Prims.b2t",
"FStar.Integers.op_Less_Equals",
"FStar.Integers.Signed",
"FStar.Integers.Winfinite",
"FStar.Seq.Base.length",
"LowStar.RVector.rs_elems_inv",
"Prims.op_Equality",
"FStar.Seq.Base.empty",
"LowStar.Regional.__proj__Rgl__item__repr",
"Prims.bool",
"FStar.Seq.Properties.snoc",
"LowStar.RVector.as_seq_seq",
"FStar.Integers.op_Subtraction",
"LowStar.Regional.__proj__Rgl__item__r_repr",
"FStar.Seq.Base.index",
"Prims.int"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1))
val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j)))
let rv_loc_elems_included #a #rst #rg h rv i j =
rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
// Properties about disjointness of locations
val rs_loc_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
l:nat{i <= l && l < j && k <> l} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj #a #rst rg rs prid i j k l = ()
val rs_loc_elem_disj_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (
forall (k:nat{i <= k && k < j}).
forall (l:nat{i <= l && l < j && k <> l}).
loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj_forall #a #rst rg rs prid i j = ()
val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2)
let rec rs_loc_elems_elem_disj #a #rst rg rs prid i j k1 k2 l =
if k1 = k2 then ()
else (rs_loc_elem_disj rg rs prid i j (k2 - 1) l;
rs_loc_elems_elem_disj rg rs prid i j k1 (k2 - 1) l)
val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2)
let rec rs_loc_elems_disj #a #rst rg rs prid i j k1 k2 l1 l2 =
if k1 = k2 then ()
else (rs_loc_elems_elem_disj rg rs prid i j l1 l2 (k2 - 1);
rs_loc_elems_disj rg rs prid i j k1 (k2 - 1) l1 l2)
val rv_loc_elems_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
k1:uint32_t{i <= k1} ->
k2:uint32_t{k1 <= k2 && k2 <= j} ->
l1:uint32_t{i <= l1} ->
l2:uint32_t{l1 <= l2 && l2 <= j} ->
Lemma (requires (rv_elems_reg h rv i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rv_loc_elems h rv k1 k2)
(rv_loc_elems h rv l1 l2)))
let rv_loc_elems_disj #a #rst #rg h rv i j k1 k2 l1 l2 =
rs_loc_elems_disj rg (V.as_seq h rv) (V.frameOf rv)
(U32.v i) (U32.v j) (U32.v k1) (U32.v k2) (U32.v l1) (U32.v l2)
val rs_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_region_only false prid)))
(decreases j)
let rec rs_loc_elems_parent_disj #a #rst rg rs prid i j =
if i = j then ()
else rs_loc_elems_parent_disj rg rs prid i (j - 1)
val rv_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_region_only false (V.frameOf rv))))
let rv_loc_elems_parent_disj #a #rst #rg h rv i j =
rs_loc_elems_parent_disj rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val rs_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> drid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (V.forall_seq rs i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_all_regions_from false drid)))
(decreases j)
let rec rs_loc_elems_each_disj #a #rst rg rs drid i j =
if i = j then ()
else rs_loc_elems_each_disj rg rs drid i (j - 1)
val rv_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
drid:HS.rid ->
Lemma (requires (V.forall_ h rv i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_all_regions_from false drid)))
let rv_loc_elems_each_disj #a #rst #rg h rv i j drid =
rs_loc_elems_each_disj rg (V.as_seq h rv) drid (U32.v i) (U32.v j)
// Preservation based on disjointness
val rv_loc_elems_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
loc_disjoint p (V.loc_vector_within rv i j) /\
modifies p h0 h1))
(ensures (rv_loc_elems h0 rv i j ==
rv_loc_elems h1 rv i j))
(decreases (U32.v j))
let rec rv_loc_elems_preserved #a #rst #rg rv i j p h0 h1 =
if i = j then ()
else (V.loc_vector_within_includes rv i j (j - 1ul) j;
V.get_preserved rv (j - 1ul) p h0 h1;
assert (V.get h0 rv (j - 1ul) == V.get h1 rv (j - 1ul));
V.loc_vector_within_includes rv i j i (j - 1ul);
rv_loc_elems_preserved rv i (j - 1ul) p h0 h1)
val rs_elems_inv_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv rg h1 rs i j))
(decreases j)
let rec rs_elems_inv_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val rv_elems_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (V.loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
modifies p h0 h1))
(ensures (rv_elems_inv h1 rv i j))
let rv_elems_inv_preserved #a #rst #rg rv i j p h0 h1 =
rs_elems_inv_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val rv_inv_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_ #a #rst #rg rv p h0 h1 =
rv_elems_inv_preserved #a #rst #rg rv 0ul (V.size_of rv) p h0 h1
// The first core lemma of `rvector`
val rv_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let rv_inv_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
rv_inv_preserved_ rv p h0 h1
val rv_inv_preserved_int:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
modifies (loc_all_regions_from false
(Rgl?.region_of rg (V.get h0 rv i))) h0 h1 /\
rg_inv rg h1 (V.get h1 rv i)))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_int #a #rst #rg rv i h0 h1 =
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv)) 0 (U32.v i) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) 0 (U32.v i)
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1;
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv))
(U32.v i + 1) (U32.v (V.size_of rv)) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) (U32.v i + 1) (U32.v (V.size_of rv))
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1
/// Representation
val as_seq_seq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = j - i})
(decreases j) | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val as_seq_seq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = j - i})
(decreases j) | [
"recursion"
] | LowStar.RVector.as_seq_seq | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
rg: LowStar.Regional.regional rst a ->
h: FStar.Monotonic.HyperStack.mem ->
rs: FStar.Seq.Base.seq a ->
i: FStar.Integers.nat ->
j:
FStar.Integers.nat
{i <= j /\ j <= FStar.Seq.Base.length rs /\ LowStar.RVector.rs_elems_inv rg h rs i j}
-> Prims.GTot (s: FStar.Seq.Base.seq (Rgl?.repr rg) {FStar.Seq.Base.length s = j - i}) | {
"end_col": 53,
"end_line": 511,
"start_col": 2,
"start_line": 509
} |
FStar.Pervasives.Lemma | val as_seq_seq_index:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat{k < j - i} ->
Lemma (requires true)
(ensures (S.index (as_seq_seq rg h rs i j) k ==
Rgl?.r_repr rg h (S.index rs (i + k))))
(decreases j)
[SMTPat (S.index (as_seq_seq rg h rs i j) k)] | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rec as_seq_seq_index #a #rst rg h rs i j k =
if i = j then ()
else if k = j - i - 1 then ()
else as_seq_seq_index rg h rs i (j - 1) k | val as_seq_seq_index:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat{k < j - i} ->
Lemma (requires true)
(ensures (S.index (as_seq_seq rg h rs i j) k ==
Rgl?.r_repr rg h (S.index rs (i + k))))
(decreases j)
[SMTPat (S.index (as_seq_seq rg h rs i j) k)]
let rec as_seq_seq_index #a #rst rg h rs i j k = | false | null | true | if i = j then () else if k = j - i - 1 then () else as_seq_seq_index rg h rs i (j - 1) k | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"lemma",
""
] | [
"LowStar.Regional.regional",
"FStar.Monotonic.HyperStack.mem",
"FStar.Seq.Base.seq",
"FStar.Integers.nat",
"Prims.l_and",
"Prims.b2t",
"FStar.Integers.op_Less_Equals",
"FStar.Integers.Signed",
"FStar.Integers.Winfinite",
"FStar.Seq.Base.length",
"LowStar.RVector.rs_elems_inv",
"FStar.Integers.op_Less",
"FStar.Integers.op_Subtraction",
"Prims.op_Equality",
"Prims.bool",
"FStar.Integers.int_t",
"LowStar.RVector.as_seq_seq_index",
"Prims.unit"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1))
val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j)))
let rv_loc_elems_included #a #rst #rg h rv i j =
rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
// Properties about disjointness of locations
val rs_loc_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
l:nat{i <= l && l < j && k <> l} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj #a #rst rg rs prid i j k l = ()
val rs_loc_elem_disj_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (
forall (k:nat{i <= k && k < j}).
forall (l:nat{i <= l && l < j && k <> l}).
loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj_forall #a #rst rg rs prid i j = ()
val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2)
let rec rs_loc_elems_elem_disj #a #rst rg rs prid i j k1 k2 l =
if k1 = k2 then ()
else (rs_loc_elem_disj rg rs prid i j (k2 - 1) l;
rs_loc_elems_elem_disj rg rs prid i j k1 (k2 - 1) l)
val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2)
let rec rs_loc_elems_disj #a #rst rg rs prid i j k1 k2 l1 l2 =
if k1 = k2 then ()
else (rs_loc_elems_elem_disj rg rs prid i j l1 l2 (k2 - 1);
rs_loc_elems_disj rg rs prid i j k1 (k2 - 1) l1 l2)
val rv_loc_elems_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
k1:uint32_t{i <= k1} ->
k2:uint32_t{k1 <= k2 && k2 <= j} ->
l1:uint32_t{i <= l1} ->
l2:uint32_t{l1 <= l2 && l2 <= j} ->
Lemma (requires (rv_elems_reg h rv i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rv_loc_elems h rv k1 k2)
(rv_loc_elems h rv l1 l2)))
let rv_loc_elems_disj #a #rst #rg h rv i j k1 k2 l1 l2 =
rs_loc_elems_disj rg (V.as_seq h rv) (V.frameOf rv)
(U32.v i) (U32.v j) (U32.v k1) (U32.v k2) (U32.v l1) (U32.v l2)
val rs_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_region_only false prid)))
(decreases j)
let rec rs_loc_elems_parent_disj #a #rst rg rs prid i j =
if i = j then ()
else rs_loc_elems_parent_disj rg rs prid i (j - 1)
val rv_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_region_only false (V.frameOf rv))))
let rv_loc_elems_parent_disj #a #rst #rg h rv i j =
rs_loc_elems_parent_disj rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val rs_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> drid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (V.forall_seq rs i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_all_regions_from false drid)))
(decreases j)
let rec rs_loc_elems_each_disj #a #rst rg rs drid i j =
if i = j then ()
else rs_loc_elems_each_disj rg rs drid i (j - 1)
val rv_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
drid:HS.rid ->
Lemma (requires (V.forall_ h rv i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_all_regions_from false drid)))
let rv_loc_elems_each_disj #a #rst #rg h rv i j drid =
rs_loc_elems_each_disj rg (V.as_seq h rv) drid (U32.v i) (U32.v j)
// Preservation based on disjointness
val rv_loc_elems_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
loc_disjoint p (V.loc_vector_within rv i j) /\
modifies p h0 h1))
(ensures (rv_loc_elems h0 rv i j ==
rv_loc_elems h1 rv i j))
(decreases (U32.v j))
let rec rv_loc_elems_preserved #a #rst #rg rv i j p h0 h1 =
if i = j then ()
else (V.loc_vector_within_includes rv i j (j - 1ul) j;
V.get_preserved rv (j - 1ul) p h0 h1;
assert (V.get h0 rv (j - 1ul) == V.get h1 rv (j - 1ul));
V.loc_vector_within_includes rv i j i (j - 1ul);
rv_loc_elems_preserved rv i (j - 1ul) p h0 h1)
val rs_elems_inv_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv rg h1 rs i j))
(decreases j)
let rec rs_elems_inv_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val rv_elems_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (V.loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
modifies p h0 h1))
(ensures (rv_elems_inv h1 rv i j))
let rv_elems_inv_preserved #a #rst #rg rv i j p h0 h1 =
rs_elems_inv_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val rv_inv_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_ #a #rst #rg rv p h0 h1 =
rv_elems_inv_preserved #a #rst #rg rv 0ul (V.size_of rv) p h0 h1
// The first core lemma of `rvector`
val rv_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let rv_inv_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
rv_inv_preserved_ rv p h0 h1
val rv_inv_preserved_int:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
modifies (loc_all_regions_from false
(Rgl?.region_of rg (V.get h0 rv i))) h0 h1 /\
rg_inv rg h1 (V.get h1 rv i)))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_int #a #rst #rg rv i h0 h1 =
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv)) 0 (U32.v i) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) 0 (U32.v i)
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1;
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv))
(U32.v i + 1) (U32.v (V.size_of rv)) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) (U32.v i + 1) (U32.v (V.size_of rv))
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1
/// Representation
val as_seq_seq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = j - i})
(decreases j)
let rec as_seq_seq #a #rst rg h rs i j =
if i = j then S.empty
else S.snoc (as_seq_seq rg h rs i (j - 1))
(Rgl?.r_repr rg h (S.index rs (j - 1)))
val as_seq_sub:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t ->
j:uint32_t{
i <= j /\
j <= V.size_of rv /\
rv_elems_inv h rv i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v j - U32.v i})
(decreases (U32.v j))
let as_seq_sub #a #rst #rg h rv i j =
as_seq_seq rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v (V.size_of rv)})
let as_seq #a #rst #rg h rv =
as_seq_sub h rv 0ul (V.size_of rv)
val as_seq_sub_as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
Lemma (S.equal (as_seq_sub h rv 0ul (V.size_of rv))
(as_seq h rv))
[SMTPat (as_seq_sub h rv 0ul (V.size_of rv))]
let as_seq_sub_as_seq #a #rst #rg h rv = ()
val as_seq_seq_index:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat{k < j - i} ->
Lemma (requires true)
(ensures (S.index (as_seq_seq rg h rs i j) k ==
Rgl?.r_repr rg h (S.index rs (i + k))))
(decreases j)
[SMTPat (S.index (as_seq_seq rg h rs i j) k)] | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val as_seq_seq_index:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat{k < j - i} ->
Lemma (requires true)
(ensures (S.index (as_seq_seq rg h rs i j) k ==
Rgl?.r_repr rg h (S.index rs (i + k))))
(decreases j)
[SMTPat (S.index (as_seq_seq rg h rs i j) k)] | [
"recursion"
] | LowStar.RVector.as_seq_seq_index | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
rg: LowStar.Regional.regional rst a ->
h: FStar.Monotonic.HyperStack.mem ->
rs: FStar.Seq.Base.seq a ->
i: FStar.Integers.nat ->
j:
FStar.Integers.nat
{i <= j /\ j <= FStar.Seq.Base.length rs /\ LowStar.RVector.rs_elems_inv rg h rs i j} ->
k: FStar.Integers.nat{k < j - i}
-> FStar.Pervasives.Lemma (requires true)
(ensures
FStar.Seq.Base.index (LowStar.RVector.as_seq_seq rg h rs i j) k ==
Rgl?.r_repr rg h (FStar.Seq.Base.index rs (i + k)))
(decreases j)
[SMTPat (FStar.Seq.Base.index (LowStar.RVector.as_seq_seq rg h rs i j) k)] | {
"end_col": 43,
"end_line": 555,
"start_col": 2,
"start_line": 553
} |
FStar.HyperStack.ST.ST | val free:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
HST.ST unit
(requires (fun h0 -> rv_inv h0 rv))
(ensures (fun h0 _ h1 -> modifies (loc_rvector rv) h0 h1)) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let free #a #rst #rg rv =
let hh0 = HST.get () in
(if V.size_of rv = 0ul then ()
else free_elems rv (V.size_of rv - 1ul));
let hh1 = HST.get () in
rv_loc_elems_included hh0 rv 0ul (V.size_of rv);
V.free rv | val free:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
HST.ST unit
(requires (fun h0 -> rv_inv h0 rv))
(ensures (fun h0 _ h1 -> modifies (loc_rvector rv) h0 h1))
let free #a #rst #rg rv = | true | null | false | let hh0 = HST.get () in
(if V.size_of rv = 0ul then () else free_elems rv (V.size_of rv - 1ul));
let hh1 = HST.get () in
rv_loc_elems_included hh0 rv 0ul (V.size_of rv);
V.free rv | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [] | [
"LowStar.Regional.regional",
"LowStar.RVector.rvector",
"LowStar.Vector.free",
"Prims.unit",
"LowStar.RVector.rv_loc_elems_included",
"FStar.UInt32.__uint_to_t",
"LowStar.Vector.size_of",
"FStar.Monotonic.HyperStack.mem",
"FStar.HyperStack.ST.get",
"Prims.op_Equality",
"FStar.UInt32.t",
"Prims.bool",
"LowStar.RVector.free_elems",
"FStar.Integers.op_Subtraction",
"FStar.Integers.Unsigned",
"FStar.Integers.W32"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1))
val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j)))
let rv_loc_elems_included #a #rst #rg h rv i j =
rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
// Properties about disjointness of locations
val rs_loc_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
l:nat{i <= l && l < j && k <> l} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj #a #rst rg rs prid i j k l = ()
val rs_loc_elem_disj_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (
forall (k:nat{i <= k && k < j}).
forall (l:nat{i <= l && l < j && k <> l}).
loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj_forall #a #rst rg rs prid i j = ()
val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2)
let rec rs_loc_elems_elem_disj #a #rst rg rs prid i j k1 k2 l =
if k1 = k2 then ()
else (rs_loc_elem_disj rg rs prid i j (k2 - 1) l;
rs_loc_elems_elem_disj rg rs prid i j k1 (k2 - 1) l)
val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2)
let rec rs_loc_elems_disj #a #rst rg rs prid i j k1 k2 l1 l2 =
if k1 = k2 then ()
else (rs_loc_elems_elem_disj rg rs prid i j l1 l2 (k2 - 1);
rs_loc_elems_disj rg rs prid i j k1 (k2 - 1) l1 l2)
val rv_loc_elems_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
k1:uint32_t{i <= k1} ->
k2:uint32_t{k1 <= k2 && k2 <= j} ->
l1:uint32_t{i <= l1} ->
l2:uint32_t{l1 <= l2 && l2 <= j} ->
Lemma (requires (rv_elems_reg h rv i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rv_loc_elems h rv k1 k2)
(rv_loc_elems h rv l1 l2)))
let rv_loc_elems_disj #a #rst #rg h rv i j k1 k2 l1 l2 =
rs_loc_elems_disj rg (V.as_seq h rv) (V.frameOf rv)
(U32.v i) (U32.v j) (U32.v k1) (U32.v k2) (U32.v l1) (U32.v l2)
val rs_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_region_only false prid)))
(decreases j)
let rec rs_loc_elems_parent_disj #a #rst rg rs prid i j =
if i = j then ()
else rs_loc_elems_parent_disj rg rs prid i (j - 1)
val rv_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_region_only false (V.frameOf rv))))
let rv_loc_elems_parent_disj #a #rst #rg h rv i j =
rs_loc_elems_parent_disj rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val rs_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> drid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (V.forall_seq rs i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_all_regions_from false drid)))
(decreases j)
let rec rs_loc_elems_each_disj #a #rst rg rs drid i j =
if i = j then ()
else rs_loc_elems_each_disj rg rs drid i (j - 1)
val rv_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
drid:HS.rid ->
Lemma (requires (V.forall_ h rv i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_all_regions_from false drid)))
let rv_loc_elems_each_disj #a #rst #rg h rv i j drid =
rs_loc_elems_each_disj rg (V.as_seq h rv) drid (U32.v i) (U32.v j)
// Preservation based on disjointness
val rv_loc_elems_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
loc_disjoint p (V.loc_vector_within rv i j) /\
modifies p h0 h1))
(ensures (rv_loc_elems h0 rv i j ==
rv_loc_elems h1 rv i j))
(decreases (U32.v j))
let rec rv_loc_elems_preserved #a #rst #rg rv i j p h0 h1 =
if i = j then ()
else (V.loc_vector_within_includes rv i j (j - 1ul) j;
V.get_preserved rv (j - 1ul) p h0 h1;
assert (V.get h0 rv (j - 1ul) == V.get h1 rv (j - 1ul));
V.loc_vector_within_includes rv i j i (j - 1ul);
rv_loc_elems_preserved rv i (j - 1ul) p h0 h1)
val rs_elems_inv_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv rg h1 rs i j))
(decreases j)
let rec rs_elems_inv_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val rv_elems_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (V.loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
modifies p h0 h1))
(ensures (rv_elems_inv h1 rv i j))
let rv_elems_inv_preserved #a #rst #rg rv i j p h0 h1 =
rs_elems_inv_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val rv_inv_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_ #a #rst #rg rv p h0 h1 =
rv_elems_inv_preserved #a #rst #rg rv 0ul (V.size_of rv) p h0 h1
// The first core lemma of `rvector`
val rv_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let rv_inv_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
rv_inv_preserved_ rv p h0 h1
val rv_inv_preserved_int:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
modifies (loc_all_regions_from false
(Rgl?.region_of rg (V.get h0 rv i))) h0 h1 /\
rg_inv rg h1 (V.get h1 rv i)))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_int #a #rst #rg rv i h0 h1 =
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv)) 0 (U32.v i) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) 0 (U32.v i)
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1;
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv))
(U32.v i + 1) (U32.v (V.size_of rv)) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) (U32.v i + 1) (U32.v (V.size_of rv))
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1
/// Representation
val as_seq_seq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = j - i})
(decreases j)
let rec as_seq_seq #a #rst rg h rs i j =
if i = j then S.empty
else S.snoc (as_seq_seq rg h rs i (j - 1))
(Rgl?.r_repr rg h (S.index rs (j - 1)))
val as_seq_sub:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t ->
j:uint32_t{
i <= j /\
j <= V.size_of rv /\
rv_elems_inv h rv i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v j - U32.v i})
(decreases (U32.v j))
let as_seq_sub #a #rst #rg h rv i j =
as_seq_seq rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v (V.size_of rv)})
let as_seq #a #rst #rg h rv =
as_seq_sub h rv 0ul (V.size_of rv)
val as_seq_sub_as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
Lemma (S.equal (as_seq_sub h rv 0ul (V.size_of rv))
(as_seq h rv))
[SMTPat (as_seq_sub h rv 0ul (V.size_of rv))]
let as_seq_sub_as_seq #a #rst #rg h rv = ()
val as_seq_seq_index:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat{k < j - i} ->
Lemma (requires true)
(ensures (S.index (as_seq_seq rg h rs i j) k ==
Rgl?.r_repr rg h (S.index rs (i + k))))
(decreases j)
[SMTPat (S.index (as_seq_seq rg h rs i j) k)]
let rec as_seq_seq_index #a #rst rg h rs i j k =
if i = j then ()
else if k = j - i - 1 then ()
else as_seq_seq_index rg h rs i (j - 1) k
val as_seq_seq_eq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs1:S.seq a -> rs2:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs1 /\ rs_elems_inv rg h rs1 i j} ->
k:nat ->
l:nat{k <= l /\ l <= S.length rs2 /\ rs_elems_inv rg h rs2 k l} ->
Lemma (requires (S.equal (S.slice rs1 i j) (S.slice rs2 k l)))
(ensures (S.equal (as_seq_seq rg h rs1 i j)
(as_seq_seq rg h rs2 k l)))
let as_seq_seq_eq #a #rst rg h rs1 rs2 i j k l =
assert (forall (a:nat{a < j - i}).
S.index (as_seq_seq rg h rs1 i j) a ==
Rgl?.r_repr rg h (S.index rs1 (i + a)));
assert (forall (a:nat{a < l - k}).
S.index (as_seq_seq rg h rs2 k l) a ==
Rgl?.r_repr rg h (S.index rs2 (k + a)));
assert (S.length (S.slice rs1 i j) = j - i);
assert (S.length (S.slice rs2 k l) = l - k);
assert (forall (a:nat{a < j - i}).
S.index (S.slice rs1 i j) a ==
S.index (S.slice rs2 k l) a);
assert (forall (a:nat{a < j - i}).
S.index rs1 (i + a) == S.index rs2 (k + a))
val as_seq_seq_slice:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat -> l:nat{k <= l && l <= j - i} ->
Lemma (S.equal (S.slice (as_seq_seq rg h rs i j) k l)
(as_seq_seq rg h (S.slice rs (i + k) (i + l)) 0 (l - k)))
#reset-options "--z3rlimit 10"
let rec as_seq_seq_slice #a #rst rg h rs i j k l =
if k = l then ()
else (as_seq_seq_slice rg h rs i j k (l - 1);
as_seq_seq_index rg h rs i j (l - 1);
as_seq_seq_eq rg h
(S.slice rs (i + k) (i + l - 1))
(S.slice rs (i + k) (i + l))
0 (l - k - 1) 0 (l - k - 1))
val as_seq_seq_upd:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{
i <= j /\
j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
k:nat{i <= k && k < j} -> v:a{rg_inv rg h v} ->
Lemma (S.equal (as_seq_seq rg h (S.upd rs k v) i j)
(S.upd (as_seq_seq rg h rs i j) (k - i)
(Rgl?.r_repr rg h v)))
let rec as_seq_seq_upd #a #rst rg h rs i j k v =
if i = j then ()
else if k = j - 1 then ()
else as_seq_seq_upd rg h rs i (j - 1) k v
// Preservation based on disjointness
val as_seq_seq_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv_preserved rg rs i j p h0 h1;
S.equal (as_seq_seq rg h0 rs i j)
(as_seq_seq rg h1 rs i j)))
let rec as_seq_seq_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
as_seq_seq_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val as_seq_sub_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
loc_disjoint p (V.loc_vector rv) /\
modifies p h0 h1))
(ensures (rv_elems_inv_preserved rv i j p h0 h1;
S.equal (as_seq_sub h0 rv i j)
(as_seq_sub h1 rv i j)))
let as_seq_sub_preserved #a #rst #rg rv i j p h0 h1 =
as_seq_seq_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val as_seq_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv_preserved_ rv p h0 h1;
S.equal (as_seq h0 rv) (as_seq h1 rv)))
let as_seq_preserved_ #a #rst #rg rv p h0 h1 =
as_seq_sub_preserved rv 0ul (V.size_of rv) p h0 h1
// The second core lemma of `rvector`
val as_seq_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv_preserved rv p h0 h1;
S.equal (as_seq h0 rv) (as_seq h1 rv)))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let as_seq_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
as_seq_preserved_ rv p h0 h1
/// Construction
val alloc_empty:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 bv h1 -> h0 == h1 /\ V.size_of bv = 0ul))
let alloc_empty #a #rst rg =
V.alloc_empty a
val alloc_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
cidx:uint32_t{cidx <= V.size_of rv} ->
HST.ST unit
(requires (fun h0 -> rv_itself_inv h0 rv))
(ensures (fun h0 _ h1 ->
modifies (V.loc_vector_within rv 0ul cidx) h0 h1 /\
rv_itself_inv h1 rv /\
rv_elems_inv h1 rv 0ul cidx /\
rv_elems_reg h1 rv 0ul cidx /\
S.equal (as_seq_sub h1 rv 0ul cidx)
(S.create (U32.v cidx) (Ghost.reveal (Rgl?.irepr rg))) /\
// the loop invariant for this function
V.forall_ h1 rv 0ul cidx
(fun r -> HS.fresh_region (Rgl?.region_of rg r) h0 h1 /\
Rgl?.r_alloc_p rg r) /\
Set.subset (Map.domain (HS.get_hmap h0))
(Map.domain (HS.get_hmap h1))))
(decreases (U32.v cidx))
#reset-options "--z3rlimit 20"
let rec alloc_ #a #rst #rg rv cidx =
let hh0 = HST.get () in
if cidx = 0ul then ()
else (let nrid = HST.new_region (V.frameOf rv) in
let v = rg_alloc rg nrid in
let hh1 = HST.get () in
V.assign rv (cidx - 1ul) v;
let hh2 = HST.get () in
V.loc_vector_within_included rv (cidx - 1ul) cidx;
Rgl?.r_sep
rg (V.get hh2 rv (cidx - 1ul))
(V.loc_vector_within rv (cidx - 1ul) cidx)
hh1 hh2;
alloc_ rv (cidx - 1ul);
let hh3 = HST.get () in
V.loc_vector_within_included rv 0ul (cidx - 1ul);
Rgl?.r_sep
rg (V.get hh3 rv (cidx - 1ul))
(V.loc_vector_within rv 0ul (cidx - 1ul))
hh2 hh3;
V.forall2_extend hh3 rv 0ul (cidx - 1ul)
(fun r1 r2 -> HS.disjoint (Rgl?.region_of rg r1)
(Rgl?.region_of rg r2));
V.loc_vector_within_union_rev rv 0ul cidx)
val alloc_rid:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} -> rid:HST.erid ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
V.frameOf rv = rid /\
V.size_of rv = len /\
V.forall_all h1 rv (fun r -> Rgl?.r_alloc_p rg r) /\
S.equal (as_seq h1 rv)
(S.create (U32.v len) (Ghost.reveal (Rgl?.irepr rg)))))
let alloc_rid #a #rst rg len rid =
let vec = V.alloc_rid len (rg_dummy rg) rid in
alloc_ #a #rst #rg vec len;
V.loc_vector_within_included vec 0ul len;
vec
val alloc_reserve:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} -> rid:HST.erid ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
V.frameOf rv = rid /\
V.size_of rv = 0ul /\
S.equal (as_seq h1 rv) S.empty /\
Set.equal (Map.domain (HS.get_hmap h0))
(Map.domain (HS.get_hmap h1)) /\
B.fresh_loc (V.loc_vector rv) h0 h1))
let alloc_reserve #a #rst rg len rid =
V.alloc_reserve len (rg_dummy rg) rid
val alloc:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
HS.fresh_region (V.frameOf rv) h0 h1 /\
V.size_of rv = len /\
V.forall_all h1 rv (fun r -> Rgl?.r_alloc_p rg r) /\
S.equal (as_seq h1 rv)
(S.create (U32.v len) (Ghost.reveal (Rgl?.irepr rg)))))
let alloc #a #rst rg len =
let nrid = HST.new_region HS.root in
alloc_rid rg len nrid
val insert:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg{not (V.is_full rv)} -> v:a ->
HST.ST (rvector rg)
(requires (fun h0 ->
rv_inv h0 rv /\ rg_inv rg h0 v /\
HS.extends (Rgl?.region_of rg v) (V.frameOf rv) /\
V.forall_all h0 rv
(fun b -> HS.disjoint (Rgl?.region_of rg b)
(Rgl?.region_of rg v))))
(ensures (fun h0 irv h1 ->
V.size_of irv = V.size_of rv + 1ul /\
V.frameOf rv = V.frameOf irv /\
modifies (loc_union (V.loc_addr_of_vector rv)
(V.loc_vector irv)) h0 h1 /\
rv_inv h1 irv /\
V.get h1 irv (V.size_of rv) == v /\
S.equal (as_seq h1 irv)
(S.snoc (as_seq h0 rv) (Rgl?.r_repr rg h0 v))))
#reset-options "--z3rlimit 20"
let insert #a #rst #rg rv v =
let hh0 = HST.get () in
let irv = V.insert rv v in
let hh1 = HST.get () in
// Safety
rs_loc_elems_parent_disj
rg (V.as_seq hh0 rv) (V.frameOf rv) 0 (U32.v (V.size_of rv));
rs_elems_inv_preserved
rg (V.as_seq hh0 rv) 0 (U32.v (V.size_of rv))
(loc_region_only false (V.frameOf rv))
hh0 hh1;
Rgl?.r_sep rg v
(loc_region_only false (V.frameOf rv))
hh0 hh1;
// Correctness
assert (S.equal (V.as_seq hh0 rv)
(S.slice (V.as_seq hh1 irv) 0 (U32.v (V.size_of rv))));
as_seq_seq_preserved
rg (V.as_seq hh0 rv)
0 (U32.v (V.size_of rv))
(loc_region_only false (V.frameOf rv)) hh0 hh1;
as_seq_seq_slice
rg hh1 (V.as_seq hh1 irv) 0 (U32.v (V.size_of irv))
0 (U32.v (V.size_of rv));
irv
val insert_copy:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> cp:copyable #rst a rg ->
rv:rvector rg{not (V.is_full rv)} -> v:a ->
HST.ST (rvector rg)
(requires (fun h0 ->
rv_inv h0 rv /\ rg_inv rg h0 v /\
HS.disjoint (Rgl?.region_of rg v) (V.frameOf rv)))
(ensures (fun h0 irv h1 ->
V.size_of irv = V.size_of rv + 1ul /\
V.frameOf rv = V.frameOf irv /\
modifies (loc_rvector rv) h0 h1 /\
rv_inv h1 irv /\
S.equal (as_seq h1 irv)
(S.snoc (as_seq h0 rv) (Rgl?.r_repr rg h0 v))))
let insert_copy #a #rst #rg cp rv v =
let hh0 = HST.get () in
rv_elems_inv_live_region hh0 rv 0ul (V.size_of rv);
let nrid = HST.new_region (V.frameOf rv) in
let nv = rg_alloc rg nrid in
let hh1 = HST.get () in
Rgl?.r_sep rg v loc_none hh0 hh1;
rv_inv_preserved rv loc_none hh0 hh1;
as_seq_preserved rv loc_none hh0 hh1;
Cpy?.copy cp (Rgl?.state rg) v nv;
let hh2 = HST.get () in
rv_loc_elems_each_disj hh2 rv 0ul (V.size_of rv) nrid;
rv_inv_preserved_ rv (loc_all_regions_from false nrid) hh1 hh2;
as_seq_preserved_ rv (loc_all_regions_from false nrid) hh1 hh2;
insert rv nv
val assign:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} -> v:a ->
HST.ST unit
(requires (fun h0 ->
// rv_inv h0 rv /\
rv_itself_inv h0 rv /\
rv_elems_inv h0 rv 0ul i /\
rv_elems_inv h0 rv (i + 1ul) (V.size_of rv) /\
elems_reg h0 rv /\
V.forall_ h0 rv 0ul i
(fun b -> HS.disjoint (Rgl?.region_of rg b)
(Rgl?.region_of rg v)) /\
V.forall_ h0 rv (i + 1ul) (V.size_of rv)
(fun b -> HS.disjoint (Rgl?.region_of rg b)
(Rgl?.region_of rg v)) /\
rg_inv rg h0 v /\
HS.extends (Rgl?.region_of rg v) (V.frameOf rv)))
(ensures (fun h0 _ h1 ->
modifies (V.loc_vector_within rv i (i + 1ul)) h0 h1 /\
rv_inv h1 rv /\
V.get h1 rv i == v /\
S.equal (as_seq h1 rv)
(S.append
(as_seq_sub h0 rv 0ul i)
(S.cons (Rgl?.r_repr rg h0 v)
(as_seq_sub h0 rv (i + 1ul) (V.size_of rv))))))
let assign #a #rst #rg rv i v =
let hh0 = HST.get () in
V.assign rv i v;
let hh1 = HST.get () in
// Safety
rs_loc_elems_parent_disj
rg (V.as_seq hh0 rv) (V.frameOf rv) 0 (U32.v i);
rs_loc_elems_parent_disj
rg (V.as_seq hh0 rv) (V.frameOf rv) (U32.v i + 1) (U32.v (V.size_of rv));
rs_elems_inv_preserved
rg (V.as_seq hh0 rv) 0 (U32.v i)
(V.loc_vector rv)
hh0 hh1;
rs_elems_inv_preserved
rg (V.as_seq hh0 rv) (U32.v i + 1) (U32.v (V.size_of rv))
(V.loc_vector rv)
hh0 hh1;
Rgl?.r_sep rg v (V.loc_vector rv) hh0 hh1;
// Correctness
rs_loc_elems_parent_disj
rg (V.as_seq hh1 rv) (V.frameOf rv) 0 (U32.v (V.size_of rv));
as_seq_seq_preserved
rg (V.as_seq hh1 rv)
0 (U32.v (V.size_of rv))
(V.loc_vector rv) hh0 hh1
private val r_sep_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
v:a{rg_inv rg h0 v} ->
Lemma (requires (loc_disjoint (loc_all_regions_from
false (Rgl?.region_of rg v)) p /\
modifies p h0 h1))
(ensures (rg_inv rg h1 v /\
Rgl?.r_repr rg h0 v == Rgl?.r_repr rg h1 v))
private let r_sep_forall #a #rst rg p h0 h1 v =
Rgl?.r_sep rg v p h0 h1
val assign_copy:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> cp:copyable #rst a rg ->
rv:rvector rg ->
i:uint32_t{i < V.size_of rv} -> v:a ->
HST.ST unit
(requires (fun h0 ->
rv_inv h0 rv /\
rg_inv rg h0 v /\
HS.disjoint (Rgl?.region_of rg v) (V.frameOf rv)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg (V.get h1 rv i))) h0 h1 /\
rv_inv h1 rv /\
S.equal (as_seq h1 rv)
(S.upd (as_seq h0 rv) (U32.v i) (Rgl?.r_repr rg h0 v))))
let assign_copy #a #rst #rg cp rv i v =
let hh0 = HST.get () in
Cpy?.copy cp (Rgl?.state rg) v (V.index rv i);
let hh1 = HST.get () in
// Safety
rv_inv_preserved_int #a #rst #rg rv i hh0 hh1;
// Correctness
forall_intro
(move_requires
(rs_loc_elem_disj
rg (V.as_seq hh0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv))
(U32.v i)));
assert (forall (k:nat{k <> U32.v i && k < U32.v (V.size_of rv)}).
loc_disjoint (rs_loc_elem rg (V.as_seq hh0 rv) k)
(rs_loc_elem rg (V.as_seq hh0 rv) (U32.v i)));
forall_intro
(move_requires
(r_sep_forall
rg (rs_loc_elem rg (V.as_seq hh0 rv) (U32.v i))
hh0 hh1));
assert (forall (k:nat{k <> U32.v i && k < U32.v (V.size_of rv)}).
loc_disjoint (rs_loc_elem rg (V.as_seq hh0 rv) k)
(rs_loc_elem rg (V.as_seq hh0 rv) (U32.v i)) ==>
Rgl?.r_repr rg hh1 (S.index (V.as_seq hh1 rv) k) ==
Rgl?.r_repr rg hh0 (S.index (V.as_seq hh0 rv) k));
assert (forall (k:nat{k <> U32.v i && k < U32.v (V.size_of rv)}).
Rgl?.r_repr rg hh1 (S.index (V.as_seq hh1 rv) k) ==
Rgl?.r_repr rg hh0 (S.index (V.as_seq hh0 rv) k));
assert (forall (k:nat{k <> U32.v i && k < U32.v (V.size_of rv)}).
S.index (as_seq_seq rg hh1 (V.as_seq hh1 rv)
0 (U32.v (V.size_of rv))) k ==
S.index (as_seq_seq rg hh0 (V.as_seq hh0 rv)
0 (U32.v (V.size_of rv))) k)
val free_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
idx:uint32_t{idx < V.size_of rv} ->
HST.ST unit
(requires (fun h0 ->
V.live h0 rv /\
rv_elems_inv h0 rv 0ul (idx + 1ul) /\
rv_elems_reg h0 rv 0ul (idx + 1ul)))
(ensures (fun h0 _ h1 ->
modifies (rv_loc_elems h0 rv 0ul (idx + 1ul)) h0 h1))
let rec free_elems #a #rst #rg rv idx =
let hh0 = HST.get () in
rg_free rg (V.index rv idx);
let hh1 = HST.get () in
rs_loc_elems_elem_disj
rg (V.as_seq hh0 rv) (V.frameOf rv)
0 (U32.v idx + 1) 0 (U32.v idx) (U32.v idx);
rv_elems_inv_preserved
rv 0ul idx (rs_loc_elem rg (V.as_seq hh0 rv) (U32.v idx)) hh0 hh1;
if idx <> 0ul then
free_elems rv (idx - 1ul)
val flush:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg -> i:uint32_t{i <= V.size_of rv} ->
HST.ST (rvector rg)
(requires (fun h0 -> rv_inv h0 rv))
(ensures (fun h0 frv h1 ->
V.size_of frv = V.size_of rv - i /\
V.frameOf rv = V.frameOf frv /\
modifies (loc_rvector rv) h0 h1 /\
rv_inv h1 frv /\
S.equal (as_seq h1 frv)
(S.slice (as_seq h0 rv) (U32.v i) (U32.v (V.size_of rv)))))
#reset-options "--z3rlimit 40"
let flush #a #rst #rg rv i =
let hh0 = HST.get () in
(if i = 0ul then () else free_elems rv (i - 1ul));
rv_loc_elems_included hh0 rv 0ul i;
let hh1 = HST.get () in
assert (modifies (rs_loc_elems rg (V.as_seq hh0 rv) 0 (U32.v i)) hh0 hh1);
let frv = V.flush rv (rg_dummy rg) i in
let hh2 = HST.get () in
assert (modifies (loc_region_only false (V.frameOf rv)) hh1 hh2);
// Safety
rs_loc_elems_disj
rg (V.as_seq hh0 rv) (V.frameOf rv) 0 (U32.v (V.size_of rv))
0 (U32.v i) (U32.v i) (U32.v (V.size_of rv));
rs_loc_elems_parent_disj
rg (V.as_seq hh0 rv) (V.frameOf rv)
(U32.v i) (U32.v (V.size_of rv));
rs_elems_inv_preserved
rg (V.as_seq hh0 rv) (U32.v i) (U32.v (V.size_of rv))
(loc_union (rs_loc_elems rg (V.as_seq hh0 rv) 0 (U32.v i))
(loc_region_only false (V.frameOf rv)))
hh0 hh2;
assert (rv_inv #a #rst #rg hh2 frv);
// Correctness
as_seq_seq_preserved
rg (V.as_seq hh0 rv) (U32.v i) (U32.v (V.size_of rv))
(loc_union (rs_loc_elems rg (V.as_seq hh0 rv) 0 (U32.v i))
(loc_region_only false (V.frameOf rv)))
hh0 hh2;
as_seq_seq_slice
rg hh0 (V.as_seq hh0 rv) 0 (U32.v (V.size_of rv))
(U32.v i) (U32.v (V.size_of rv));
assert (S.equal (S.slice (as_seq hh0 rv) (U32.v i) (U32.v (V.size_of rv)))
(as_seq_seq rg hh2 (V.as_seq hh0 rv)
(U32.v i) (U32.v (V.size_of rv))));
as_seq_seq_eq
rg hh2 (V.as_seq hh0 rv) (V.as_seq hh2 frv)
(U32.v i) (U32.v (V.size_of rv)) 0 (U32.v (V.size_of frv));
assert (S.equal (as_seq_seq rg hh2 (V.as_seq hh2 frv)
0 (U32.v (V.size_of frv)))
(as_seq_seq rg hh2 (V.as_seq hh0 rv)
(U32.v i) (U32.v (V.size_of rv))));
assert (S.equal (S.slice (as_seq hh0 rv) (U32.v i) (U32.v (V.size_of rv)))
(as_seq hh2 frv));
frv
val free_elems_from:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
idx:uint32_t{idx < V.size_of rv} ->
HST.ST unit
(requires (fun h0 ->
V.live h0 rv /\
rv_elems_inv h0 rv idx (V.size_of rv) /\
rv_elems_reg h0 rv idx (V.size_of rv)))
(ensures (fun h0 _ h1 ->
modifies (rv_loc_elems h0 rv idx (V.size_of rv)) h0 h1))
let rec free_elems_from #a #rst #rg rv idx =
let hh0 = HST.get () in
rs_loc_elems_elem_disj
rg (V.as_seq hh0 rv) (V.frameOf rv)
(U32.v idx) (U32.v (V.size_of rv))
(U32.v idx+1) (U32.v (V.size_of rv))
(U32.v idx);
rg_free rg (V.index rv idx);
let hh1 = HST.get () in
rv_elems_inv_preserved
rv (idx+1ul) (V.size_of rv)
(rv_loc_elem hh0 rv idx) hh0 hh1;
if idx + 1ul < V.size_of rv then
begin
free_elems_from rv (idx + 1ul);
rs_loc_elems_rec_inverse rg (V.as_seq hh0 rv) (U32.v idx) (U32.v (V.size_of rv))
end
val shrink:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg -> new_size:uint32_t{new_size <= V.size_of rv} ->
HST.ST (rvector rg)
(requires (fun h0 -> rv_inv h0 rv))
(ensures (fun h0 frv h1 ->
V.size_of frv = new_size /\
V.frameOf rv = V.frameOf frv /\
modifies (loc_rvector rv) h0 h1 /\
rv_inv h1 frv /\
S.equal (as_seq h1 frv)
(S.slice (as_seq h0 rv) 0 (U32.v new_size))))
#reset-options "--z3rlimit 40"
let shrink #a #rst #rg rv new_size =
let size = V.size_of rv in
[@@inline_let] let sz = U32.v size in
[@@inline_let] let nsz = U32.v new_size in
let hh0 = HST.get () in
if new_size >= size then rv else
begin
free_elems_from rv new_size;
rv_loc_elems_included hh0 rv new_size size;
let hh1 = HST.get () in
assert (modifies (rs_loc_elems rg (V.as_seq hh0 rv) nsz sz) hh0 hh1);
let frv = V.shrink rv new_size in
let hh2 = HST.get () in
assert (modifies (loc_region_only false (V.frameOf rv)) hh1 hh2);
// Safety
rs_loc_elems_disj
rg (V.as_seq hh0 rv) (V.frameOf rv) 0 sz
0 nsz nsz sz;
rs_loc_elems_parent_disj
rg (V.as_seq hh0 rv) (V.frameOf rv) 0 nsz;
rs_elems_inv_preserved
rg (V.as_seq hh0 rv) 0 nsz
(loc_union (rs_loc_elems rg (V.as_seq hh0 rv) nsz sz)
(loc_region_only false (V.frameOf rv)))
hh0 hh2;
assert (rv_inv #a #rst #rg hh2 frv);
// Correctness
as_seq_seq_preserved
rg (V.as_seq hh0 rv) 0 nsz
(loc_union (rs_loc_elems rg (V.as_seq hh0 rv) nsz sz)
(loc_region_only false (V.frameOf rv)))
hh0 hh2;
as_seq_seq_slice
rg hh0 (V.as_seq hh0 rv) 0 sz 0 nsz;
assert (S.equal (S.slice (as_seq hh0 rv) 0 nsz)
(as_seq_seq rg hh2 (V.as_seq hh0 rv) 0 nsz));
as_seq_seq_eq
rg hh2 (V.as_seq hh0 rv) (V.as_seq hh2 frv) 0 nsz 0 nsz;
assert (S.equal (as_seq_seq rg hh2 (V.as_seq hh2 frv) 0 nsz)
(as_seq_seq rg hh2 (V.as_seq hh0 rv) 0 nsz));
assert (S.equal (S.slice (as_seq hh0 rv) 0 nsz)
(as_seq hh2 frv));
frv
end
val free:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
HST.ST unit
(requires (fun h0 -> rv_inv h0 rv)) | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 40,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val free:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
HST.ST unit
(requires (fun h0 -> rv_inv h0 rv))
(ensures (fun h0 _ h1 -> modifies (loc_rvector rv) h0 h1)) | [] | LowStar.RVector.free | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | rv: LowStar.RVector.rvector rg -> FStar.HyperStack.ST.ST Prims.unit | {
"end_col": 11,
"end_line": 1184,
"start_col": 25,
"start_line": 1178
} |
FStar.Pervasives.Lemma | val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1))) | val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j = | false | null | true | if i + 1 = j
then ()
else
(assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1)) (rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1)) (rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union (rs_loc_elem rg rs i) (rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1))) | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"lemma",
""
] | [
"LowStar.Regional.regional",
"FStar.Seq.Base.seq",
"FStar.Integers.nat",
"Prims.b2t",
"Prims.op_AmpAmp",
"FStar.Integers.op_Less",
"FStar.Integers.Signed",
"FStar.Integers.Winfinite",
"FStar.Integers.op_Less_Equals",
"FStar.Seq.Base.length",
"Prims.op_Equality",
"FStar.Integers.int_t",
"FStar.Integers.op_Plus",
"Prims.bool",
"LowStar.Monotonic.Buffer.loc_union_assoc",
"LowStar.RVector.rs_loc_elem",
"LowStar.RVector.rs_loc_elems",
"FStar.Integers.op_Subtraction",
"Prims.unit",
"Prims._assert",
"Prims.eq2",
"LowStar.Monotonic.Buffer.loc",
"LowStar.Monotonic.Buffer.loc_union",
"LowStar.RVector.rs_loc_elems_rec_inverse"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j) | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j) | [
"recursion"
] | LowStar.RVector.rs_loc_elems_rec_inverse | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
rg: LowStar.Regional.regional rst a ->
rs: FStar.Seq.Base.seq a ->
i: FStar.Integers.nat ->
j: FStar.Integers.nat{i < j && j <= FStar.Seq.Base.length rs}
-> FStar.Pervasives.Lemma (requires true)
(ensures
LowStar.RVector.rs_loc_elems rg rs i j ==
LowStar.Monotonic.Buffer.loc_union (LowStar.RVector.rs_loc_elem rg rs i)
(LowStar.RVector.rs_loc_elems rg rs (i + 1) j))
(decreases j) | {
"end_col": 51,
"end_line": 221,
"start_col": 2,
"start_line": 206
} |
FStar.HyperStack.ST.ST | val alloc_rid:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} -> rid:HST.erid ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
V.frameOf rv = rid /\
V.size_of rv = len /\
V.forall_all h1 rv (fun r -> Rgl?.r_alloc_p rg r) /\
S.equal (as_seq h1 rv)
(S.create (U32.v len) (Ghost.reveal (Rgl?.irepr rg))))) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let alloc_rid #a #rst rg len rid =
let vec = V.alloc_rid len (rg_dummy rg) rid in
alloc_ #a #rst #rg vec len;
V.loc_vector_within_included vec 0ul len;
vec | val alloc_rid:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} -> rid:HST.erid ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
V.frameOf rv = rid /\
V.size_of rv = len /\
V.forall_all h1 rv (fun r -> Rgl?.r_alloc_p rg r) /\
S.equal (as_seq h1 rv)
(S.create (U32.v len) (Ghost.reveal (Rgl?.irepr rg)))))
let alloc_rid #a #rst rg len rid = | true | null | false | let vec = V.alloc_rid len (rg_dummy rg) rid in
alloc_ #a #rst #rg vec len;
V.loc_vector_within_included vec 0ul len;
vec | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [] | [
"LowStar.Regional.regional",
"LowStar.Vector.uint32_t",
"Prims.b2t",
"FStar.Integers.op_Greater",
"FStar.Integers.Unsigned",
"FStar.Integers.W32",
"FStar.UInt32.__uint_to_t",
"FStar.HyperStack.ST.erid",
"Prims.unit",
"LowStar.Vector.loc_vector_within_included",
"LowStar.RVector.rvector",
"LowStar.RVector.alloc_",
"LowStar.Vector.vector",
"LowStar.Vector.alloc_rid",
"LowStar.Regional.rg_dummy"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1))
val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j)))
let rv_loc_elems_included #a #rst #rg h rv i j =
rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
// Properties about disjointness of locations
val rs_loc_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
l:nat{i <= l && l < j && k <> l} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj #a #rst rg rs prid i j k l = ()
val rs_loc_elem_disj_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (
forall (k:nat{i <= k && k < j}).
forall (l:nat{i <= l && l < j && k <> l}).
loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj_forall #a #rst rg rs prid i j = ()
val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2)
let rec rs_loc_elems_elem_disj #a #rst rg rs prid i j k1 k2 l =
if k1 = k2 then ()
else (rs_loc_elem_disj rg rs prid i j (k2 - 1) l;
rs_loc_elems_elem_disj rg rs prid i j k1 (k2 - 1) l)
val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2)
let rec rs_loc_elems_disj #a #rst rg rs prid i j k1 k2 l1 l2 =
if k1 = k2 then ()
else (rs_loc_elems_elem_disj rg rs prid i j l1 l2 (k2 - 1);
rs_loc_elems_disj rg rs prid i j k1 (k2 - 1) l1 l2)
val rv_loc_elems_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
k1:uint32_t{i <= k1} ->
k2:uint32_t{k1 <= k2 && k2 <= j} ->
l1:uint32_t{i <= l1} ->
l2:uint32_t{l1 <= l2 && l2 <= j} ->
Lemma (requires (rv_elems_reg h rv i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rv_loc_elems h rv k1 k2)
(rv_loc_elems h rv l1 l2)))
let rv_loc_elems_disj #a #rst #rg h rv i j k1 k2 l1 l2 =
rs_loc_elems_disj rg (V.as_seq h rv) (V.frameOf rv)
(U32.v i) (U32.v j) (U32.v k1) (U32.v k2) (U32.v l1) (U32.v l2)
val rs_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_region_only false prid)))
(decreases j)
let rec rs_loc_elems_parent_disj #a #rst rg rs prid i j =
if i = j then ()
else rs_loc_elems_parent_disj rg rs prid i (j - 1)
val rv_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_region_only false (V.frameOf rv))))
let rv_loc_elems_parent_disj #a #rst #rg h rv i j =
rs_loc_elems_parent_disj rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val rs_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> drid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (V.forall_seq rs i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_all_regions_from false drid)))
(decreases j)
let rec rs_loc_elems_each_disj #a #rst rg rs drid i j =
if i = j then ()
else rs_loc_elems_each_disj rg rs drid i (j - 1)
val rv_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
drid:HS.rid ->
Lemma (requires (V.forall_ h rv i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_all_regions_from false drid)))
let rv_loc_elems_each_disj #a #rst #rg h rv i j drid =
rs_loc_elems_each_disj rg (V.as_seq h rv) drid (U32.v i) (U32.v j)
// Preservation based on disjointness
val rv_loc_elems_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
loc_disjoint p (V.loc_vector_within rv i j) /\
modifies p h0 h1))
(ensures (rv_loc_elems h0 rv i j ==
rv_loc_elems h1 rv i j))
(decreases (U32.v j))
let rec rv_loc_elems_preserved #a #rst #rg rv i j p h0 h1 =
if i = j then ()
else (V.loc_vector_within_includes rv i j (j - 1ul) j;
V.get_preserved rv (j - 1ul) p h0 h1;
assert (V.get h0 rv (j - 1ul) == V.get h1 rv (j - 1ul));
V.loc_vector_within_includes rv i j i (j - 1ul);
rv_loc_elems_preserved rv i (j - 1ul) p h0 h1)
val rs_elems_inv_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv rg h1 rs i j))
(decreases j)
let rec rs_elems_inv_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val rv_elems_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (V.loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
modifies p h0 h1))
(ensures (rv_elems_inv h1 rv i j))
let rv_elems_inv_preserved #a #rst #rg rv i j p h0 h1 =
rs_elems_inv_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val rv_inv_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_ #a #rst #rg rv p h0 h1 =
rv_elems_inv_preserved #a #rst #rg rv 0ul (V.size_of rv) p h0 h1
// The first core lemma of `rvector`
val rv_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let rv_inv_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
rv_inv_preserved_ rv p h0 h1
val rv_inv_preserved_int:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
modifies (loc_all_regions_from false
(Rgl?.region_of rg (V.get h0 rv i))) h0 h1 /\
rg_inv rg h1 (V.get h1 rv i)))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_int #a #rst #rg rv i h0 h1 =
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv)) 0 (U32.v i) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) 0 (U32.v i)
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1;
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv))
(U32.v i + 1) (U32.v (V.size_of rv)) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) (U32.v i + 1) (U32.v (V.size_of rv))
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1
/// Representation
val as_seq_seq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = j - i})
(decreases j)
let rec as_seq_seq #a #rst rg h rs i j =
if i = j then S.empty
else S.snoc (as_seq_seq rg h rs i (j - 1))
(Rgl?.r_repr rg h (S.index rs (j - 1)))
val as_seq_sub:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t ->
j:uint32_t{
i <= j /\
j <= V.size_of rv /\
rv_elems_inv h rv i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v j - U32.v i})
(decreases (U32.v j))
let as_seq_sub #a #rst #rg h rv i j =
as_seq_seq rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v (V.size_of rv)})
let as_seq #a #rst #rg h rv =
as_seq_sub h rv 0ul (V.size_of rv)
val as_seq_sub_as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
Lemma (S.equal (as_seq_sub h rv 0ul (V.size_of rv))
(as_seq h rv))
[SMTPat (as_seq_sub h rv 0ul (V.size_of rv))]
let as_seq_sub_as_seq #a #rst #rg h rv = ()
val as_seq_seq_index:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat{k < j - i} ->
Lemma (requires true)
(ensures (S.index (as_seq_seq rg h rs i j) k ==
Rgl?.r_repr rg h (S.index rs (i + k))))
(decreases j)
[SMTPat (S.index (as_seq_seq rg h rs i j) k)]
let rec as_seq_seq_index #a #rst rg h rs i j k =
if i = j then ()
else if k = j - i - 1 then ()
else as_seq_seq_index rg h rs i (j - 1) k
val as_seq_seq_eq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs1:S.seq a -> rs2:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs1 /\ rs_elems_inv rg h rs1 i j} ->
k:nat ->
l:nat{k <= l /\ l <= S.length rs2 /\ rs_elems_inv rg h rs2 k l} ->
Lemma (requires (S.equal (S.slice rs1 i j) (S.slice rs2 k l)))
(ensures (S.equal (as_seq_seq rg h rs1 i j)
(as_seq_seq rg h rs2 k l)))
let as_seq_seq_eq #a #rst rg h rs1 rs2 i j k l =
assert (forall (a:nat{a < j - i}).
S.index (as_seq_seq rg h rs1 i j) a ==
Rgl?.r_repr rg h (S.index rs1 (i + a)));
assert (forall (a:nat{a < l - k}).
S.index (as_seq_seq rg h rs2 k l) a ==
Rgl?.r_repr rg h (S.index rs2 (k + a)));
assert (S.length (S.slice rs1 i j) = j - i);
assert (S.length (S.slice rs2 k l) = l - k);
assert (forall (a:nat{a < j - i}).
S.index (S.slice rs1 i j) a ==
S.index (S.slice rs2 k l) a);
assert (forall (a:nat{a < j - i}).
S.index rs1 (i + a) == S.index rs2 (k + a))
val as_seq_seq_slice:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat -> l:nat{k <= l && l <= j - i} ->
Lemma (S.equal (S.slice (as_seq_seq rg h rs i j) k l)
(as_seq_seq rg h (S.slice rs (i + k) (i + l)) 0 (l - k)))
#reset-options "--z3rlimit 10"
let rec as_seq_seq_slice #a #rst rg h rs i j k l =
if k = l then ()
else (as_seq_seq_slice rg h rs i j k (l - 1);
as_seq_seq_index rg h rs i j (l - 1);
as_seq_seq_eq rg h
(S.slice rs (i + k) (i + l - 1))
(S.slice rs (i + k) (i + l))
0 (l - k - 1) 0 (l - k - 1))
val as_seq_seq_upd:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{
i <= j /\
j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
k:nat{i <= k && k < j} -> v:a{rg_inv rg h v} ->
Lemma (S.equal (as_seq_seq rg h (S.upd rs k v) i j)
(S.upd (as_seq_seq rg h rs i j) (k - i)
(Rgl?.r_repr rg h v)))
let rec as_seq_seq_upd #a #rst rg h rs i j k v =
if i = j then ()
else if k = j - 1 then ()
else as_seq_seq_upd rg h rs i (j - 1) k v
// Preservation based on disjointness
val as_seq_seq_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv_preserved rg rs i j p h0 h1;
S.equal (as_seq_seq rg h0 rs i j)
(as_seq_seq rg h1 rs i j)))
let rec as_seq_seq_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
as_seq_seq_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val as_seq_sub_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
loc_disjoint p (V.loc_vector rv) /\
modifies p h0 h1))
(ensures (rv_elems_inv_preserved rv i j p h0 h1;
S.equal (as_seq_sub h0 rv i j)
(as_seq_sub h1 rv i j)))
let as_seq_sub_preserved #a #rst #rg rv i j p h0 h1 =
as_seq_seq_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val as_seq_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv_preserved_ rv p h0 h1;
S.equal (as_seq h0 rv) (as_seq h1 rv)))
let as_seq_preserved_ #a #rst #rg rv p h0 h1 =
as_seq_sub_preserved rv 0ul (V.size_of rv) p h0 h1
// The second core lemma of `rvector`
val as_seq_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv_preserved rv p h0 h1;
S.equal (as_seq h0 rv) (as_seq h1 rv)))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let as_seq_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
as_seq_preserved_ rv p h0 h1
/// Construction
val alloc_empty:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 bv h1 -> h0 == h1 /\ V.size_of bv = 0ul))
let alloc_empty #a #rst rg =
V.alloc_empty a
val alloc_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
cidx:uint32_t{cidx <= V.size_of rv} ->
HST.ST unit
(requires (fun h0 -> rv_itself_inv h0 rv))
(ensures (fun h0 _ h1 ->
modifies (V.loc_vector_within rv 0ul cidx) h0 h1 /\
rv_itself_inv h1 rv /\
rv_elems_inv h1 rv 0ul cidx /\
rv_elems_reg h1 rv 0ul cidx /\
S.equal (as_seq_sub h1 rv 0ul cidx)
(S.create (U32.v cidx) (Ghost.reveal (Rgl?.irepr rg))) /\
// the loop invariant for this function
V.forall_ h1 rv 0ul cidx
(fun r -> HS.fresh_region (Rgl?.region_of rg r) h0 h1 /\
Rgl?.r_alloc_p rg r) /\
Set.subset (Map.domain (HS.get_hmap h0))
(Map.domain (HS.get_hmap h1))))
(decreases (U32.v cidx))
#reset-options "--z3rlimit 20"
let rec alloc_ #a #rst #rg rv cidx =
let hh0 = HST.get () in
if cidx = 0ul then ()
else (let nrid = HST.new_region (V.frameOf rv) in
let v = rg_alloc rg nrid in
let hh1 = HST.get () in
V.assign rv (cidx - 1ul) v;
let hh2 = HST.get () in
V.loc_vector_within_included rv (cidx - 1ul) cidx;
Rgl?.r_sep
rg (V.get hh2 rv (cidx - 1ul))
(V.loc_vector_within rv (cidx - 1ul) cidx)
hh1 hh2;
alloc_ rv (cidx - 1ul);
let hh3 = HST.get () in
V.loc_vector_within_included rv 0ul (cidx - 1ul);
Rgl?.r_sep
rg (V.get hh3 rv (cidx - 1ul))
(V.loc_vector_within rv 0ul (cidx - 1ul))
hh2 hh3;
V.forall2_extend hh3 rv 0ul (cidx - 1ul)
(fun r1 r2 -> HS.disjoint (Rgl?.region_of rg r1)
(Rgl?.region_of rg r2));
V.loc_vector_within_union_rev rv 0ul cidx)
val alloc_rid:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} -> rid:HST.erid ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
V.frameOf rv = rid /\
V.size_of rv = len /\
V.forall_all h1 rv (fun r -> Rgl?.r_alloc_p rg r) /\
S.equal (as_seq h1 rv) | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val alloc_rid:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} -> rid:HST.erid ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
V.frameOf rv = rid /\
V.size_of rv = len /\
V.forall_all h1 rv (fun r -> Rgl?.r_alloc_p rg r) /\
S.equal (as_seq h1 rv)
(S.create (U32.v len) (Ghost.reveal (Rgl?.irepr rg))))) | [] | LowStar.RVector.alloc_rid | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
rg: LowStar.Regional.regional rst a ->
len: LowStar.Vector.uint32_t{len > 0ul} ->
rid: FStar.HyperStack.ST.erid
-> FStar.HyperStack.ST.ST (LowStar.RVector.rvector rg) | {
"end_col": 5,
"end_line": 759,
"start_col": 34,
"start_line": 755
} |
FStar.Pervasives.Lemma | val rv_loc_elems_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
loc_disjoint p (V.loc_vector_within rv i j) /\
modifies p h0 h1))
(ensures (rv_loc_elems h0 rv i j ==
rv_loc_elems h1 rv i j))
(decreases (U32.v j)) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rec rv_loc_elems_preserved #a #rst #rg rv i j p h0 h1 =
if i = j then ()
else (V.loc_vector_within_includes rv i j (j - 1ul) j;
V.get_preserved rv (j - 1ul) p h0 h1;
assert (V.get h0 rv (j - 1ul) == V.get h1 rv (j - 1ul));
V.loc_vector_within_includes rv i j i (j - 1ul);
rv_loc_elems_preserved rv i (j - 1ul) p h0 h1) | val rv_loc_elems_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
loc_disjoint p (V.loc_vector_within rv i j) /\
modifies p h0 h1))
(ensures (rv_loc_elems h0 rv i j ==
rv_loc_elems h1 rv i j))
(decreases (U32.v j))
let rec rv_loc_elems_preserved #a #rst #rg rv i j p h0 h1 = | false | null | true | if i = j
then ()
else
(V.loc_vector_within_includes rv i j (j - 1ul) j;
V.get_preserved rv (j - 1ul) p h0 h1;
assert (V.get h0 rv (j - 1ul) == V.get h1 rv (j - 1ul));
V.loc_vector_within_includes rv i j i (j - 1ul);
rv_loc_elems_preserved rv i (j - 1ul) p h0 h1) | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"lemma",
""
] | [
"LowStar.Regional.regional",
"LowStar.RVector.rvector",
"LowStar.Vector.uint32_t",
"Prims.b2t",
"Prims.op_AmpAmp",
"FStar.Integers.op_Less_Equals",
"FStar.Integers.Unsigned",
"FStar.Integers.W32",
"LowStar.Vector.size_of",
"LowStar.Monotonic.Buffer.loc",
"FStar.Monotonic.HyperStack.mem",
"Prims.op_Equality",
"Prims.bool",
"LowStar.RVector.rv_loc_elems_preserved",
"FStar.Integers.op_Subtraction",
"FStar.UInt32.__uint_to_t",
"Prims.unit",
"LowStar.Vector.loc_vector_within_includes",
"Prims._assert",
"Prims.eq2",
"LowStar.Vector.get",
"LowStar.Vector.get_preserved"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1))
val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j)))
let rv_loc_elems_included #a #rst #rg h rv i j =
rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
// Properties about disjointness of locations
val rs_loc_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
l:nat{i <= l && l < j && k <> l} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj #a #rst rg rs prid i j k l = ()
val rs_loc_elem_disj_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (
forall (k:nat{i <= k && k < j}).
forall (l:nat{i <= l && l < j && k <> l}).
loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj_forall #a #rst rg rs prid i j = ()
val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2)
let rec rs_loc_elems_elem_disj #a #rst rg rs prid i j k1 k2 l =
if k1 = k2 then ()
else (rs_loc_elem_disj rg rs prid i j (k2 - 1) l;
rs_loc_elems_elem_disj rg rs prid i j k1 (k2 - 1) l)
val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2)
let rec rs_loc_elems_disj #a #rst rg rs prid i j k1 k2 l1 l2 =
if k1 = k2 then ()
else (rs_loc_elems_elem_disj rg rs prid i j l1 l2 (k2 - 1);
rs_loc_elems_disj rg rs prid i j k1 (k2 - 1) l1 l2)
val rv_loc_elems_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
k1:uint32_t{i <= k1} ->
k2:uint32_t{k1 <= k2 && k2 <= j} ->
l1:uint32_t{i <= l1} ->
l2:uint32_t{l1 <= l2 && l2 <= j} ->
Lemma (requires (rv_elems_reg h rv i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rv_loc_elems h rv k1 k2)
(rv_loc_elems h rv l1 l2)))
let rv_loc_elems_disj #a #rst #rg h rv i j k1 k2 l1 l2 =
rs_loc_elems_disj rg (V.as_seq h rv) (V.frameOf rv)
(U32.v i) (U32.v j) (U32.v k1) (U32.v k2) (U32.v l1) (U32.v l2)
val rs_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_region_only false prid)))
(decreases j)
let rec rs_loc_elems_parent_disj #a #rst rg rs prid i j =
if i = j then ()
else rs_loc_elems_parent_disj rg rs prid i (j - 1)
val rv_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_region_only false (V.frameOf rv))))
let rv_loc_elems_parent_disj #a #rst #rg h rv i j =
rs_loc_elems_parent_disj rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val rs_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> drid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (V.forall_seq rs i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_all_regions_from false drid)))
(decreases j)
let rec rs_loc_elems_each_disj #a #rst rg rs drid i j =
if i = j then ()
else rs_loc_elems_each_disj rg rs drid i (j - 1)
val rv_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
drid:HS.rid ->
Lemma (requires (V.forall_ h rv i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_all_regions_from false drid)))
let rv_loc_elems_each_disj #a #rst #rg h rv i j drid =
rs_loc_elems_each_disj rg (V.as_seq h rv) drid (U32.v i) (U32.v j)
// Preservation based on disjointness
val rv_loc_elems_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
loc_disjoint p (V.loc_vector_within rv i j) /\
modifies p h0 h1))
(ensures (rv_loc_elems h0 rv i j ==
rv_loc_elems h1 rv i j))
(decreases (U32.v j)) | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val rv_loc_elems_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
loc_disjoint p (V.loc_vector_within rv i j) /\
modifies p h0 h1))
(ensures (rv_loc_elems h0 rv i j ==
rv_loc_elems h1 rv i j))
(decreases (U32.v j)) | [
"recursion"
] | LowStar.RVector.rv_loc_elems_preserved | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
rv: LowStar.RVector.rvector rg ->
i: LowStar.Vector.uint32_t ->
j: LowStar.Vector.uint32_t{i <= j && j <= LowStar.Vector.size_of rv} ->
p: LowStar.Monotonic.Buffer.loc ->
h0: FStar.Monotonic.HyperStack.mem ->
h1: FStar.Monotonic.HyperStack.mem
-> FStar.Pervasives.Lemma
(requires
LowStar.Vector.live h0 rv /\
LowStar.Monotonic.Buffer.loc_disjoint p (LowStar.Vector.loc_vector_within rv i j) /\
LowStar.Monotonic.Buffer.modifies p h0 h1)
(ensures LowStar.RVector.rv_loc_elems h0 rv i j == LowStar.RVector.rv_loc_elems h1 rv i j)
(decreases FStar.UInt32.v j) | {
"end_col": 53,
"end_line": 413,
"start_col": 2,
"start_line": 408
} |
FStar.Pervasives.Lemma | val as_seq_seq_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv_preserved rg rs i j p h0 h1;
S.equal (as_seq_seq rg h0 rs i j)
(as_seq_seq rg h1 rs i j))) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rec as_seq_seq_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
as_seq_seq_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1) | val as_seq_seq_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv_preserved rg rs i j p h0 h1;
S.equal (as_seq_seq rg h0 rs i j)
(as_seq_seq rg h1 rs i j)))
let rec as_seq_seq_preserved #a #rst rg rs i j p h0 h1 = | false | null | true | if i = j
then ()
else
(rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
as_seq_seq_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1) | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"lemma"
] | [
"LowStar.Regional.regional",
"FStar.Seq.Base.seq",
"FStar.Integers.nat",
"Prims.b2t",
"Prims.op_AmpAmp",
"FStar.Integers.op_Less_Equals",
"FStar.Integers.Signed",
"FStar.Integers.Winfinite",
"FStar.Seq.Base.length",
"LowStar.Monotonic.Buffer.loc",
"FStar.Monotonic.HyperStack.mem",
"Prims.op_Equality",
"Prims.bool",
"LowStar.Regional.__proj__Rgl__item__r_sep",
"FStar.Seq.Base.index",
"FStar.Integers.op_Subtraction",
"Prims.unit",
"LowStar.RVector.as_seq_seq_preserved",
"LowStar.RVector.rs_elems_inv_preserved"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1))
val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j)))
let rv_loc_elems_included #a #rst #rg h rv i j =
rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
// Properties about disjointness of locations
val rs_loc_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
l:nat{i <= l && l < j && k <> l} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj #a #rst rg rs prid i j k l = ()
val rs_loc_elem_disj_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (
forall (k:nat{i <= k && k < j}).
forall (l:nat{i <= l && l < j && k <> l}).
loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj_forall #a #rst rg rs prid i j = ()
val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2)
let rec rs_loc_elems_elem_disj #a #rst rg rs prid i j k1 k2 l =
if k1 = k2 then ()
else (rs_loc_elem_disj rg rs prid i j (k2 - 1) l;
rs_loc_elems_elem_disj rg rs prid i j k1 (k2 - 1) l)
val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2)
let rec rs_loc_elems_disj #a #rst rg rs prid i j k1 k2 l1 l2 =
if k1 = k2 then ()
else (rs_loc_elems_elem_disj rg rs prid i j l1 l2 (k2 - 1);
rs_loc_elems_disj rg rs prid i j k1 (k2 - 1) l1 l2)
val rv_loc_elems_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
k1:uint32_t{i <= k1} ->
k2:uint32_t{k1 <= k2 && k2 <= j} ->
l1:uint32_t{i <= l1} ->
l2:uint32_t{l1 <= l2 && l2 <= j} ->
Lemma (requires (rv_elems_reg h rv i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rv_loc_elems h rv k1 k2)
(rv_loc_elems h rv l1 l2)))
let rv_loc_elems_disj #a #rst #rg h rv i j k1 k2 l1 l2 =
rs_loc_elems_disj rg (V.as_seq h rv) (V.frameOf rv)
(U32.v i) (U32.v j) (U32.v k1) (U32.v k2) (U32.v l1) (U32.v l2)
val rs_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_region_only false prid)))
(decreases j)
let rec rs_loc_elems_parent_disj #a #rst rg rs prid i j =
if i = j then ()
else rs_loc_elems_parent_disj rg rs prid i (j - 1)
val rv_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_region_only false (V.frameOf rv))))
let rv_loc_elems_parent_disj #a #rst #rg h rv i j =
rs_loc_elems_parent_disj rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val rs_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> drid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (V.forall_seq rs i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_all_regions_from false drid)))
(decreases j)
let rec rs_loc_elems_each_disj #a #rst rg rs drid i j =
if i = j then ()
else rs_loc_elems_each_disj rg rs drid i (j - 1)
val rv_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
drid:HS.rid ->
Lemma (requires (V.forall_ h rv i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_all_regions_from false drid)))
let rv_loc_elems_each_disj #a #rst #rg h rv i j drid =
rs_loc_elems_each_disj rg (V.as_seq h rv) drid (U32.v i) (U32.v j)
// Preservation based on disjointness
val rv_loc_elems_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
loc_disjoint p (V.loc_vector_within rv i j) /\
modifies p h0 h1))
(ensures (rv_loc_elems h0 rv i j ==
rv_loc_elems h1 rv i j))
(decreases (U32.v j))
let rec rv_loc_elems_preserved #a #rst #rg rv i j p h0 h1 =
if i = j then ()
else (V.loc_vector_within_includes rv i j (j - 1ul) j;
V.get_preserved rv (j - 1ul) p h0 h1;
assert (V.get h0 rv (j - 1ul) == V.get h1 rv (j - 1ul));
V.loc_vector_within_includes rv i j i (j - 1ul);
rv_loc_elems_preserved rv i (j - 1ul) p h0 h1)
val rs_elems_inv_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv rg h1 rs i j))
(decreases j)
let rec rs_elems_inv_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val rv_elems_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (V.loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
modifies p h0 h1))
(ensures (rv_elems_inv h1 rv i j))
let rv_elems_inv_preserved #a #rst #rg rv i j p h0 h1 =
rs_elems_inv_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val rv_inv_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_ #a #rst #rg rv p h0 h1 =
rv_elems_inv_preserved #a #rst #rg rv 0ul (V.size_of rv) p h0 h1
// The first core lemma of `rvector`
val rv_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let rv_inv_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
rv_inv_preserved_ rv p h0 h1
val rv_inv_preserved_int:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
modifies (loc_all_regions_from false
(Rgl?.region_of rg (V.get h0 rv i))) h0 h1 /\
rg_inv rg h1 (V.get h1 rv i)))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_int #a #rst #rg rv i h0 h1 =
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv)) 0 (U32.v i) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) 0 (U32.v i)
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1;
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv))
(U32.v i + 1) (U32.v (V.size_of rv)) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) (U32.v i + 1) (U32.v (V.size_of rv))
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1
/// Representation
val as_seq_seq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = j - i})
(decreases j)
let rec as_seq_seq #a #rst rg h rs i j =
if i = j then S.empty
else S.snoc (as_seq_seq rg h rs i (j - 1))
(Rgl?.r_repr rg h (S.index rs (j - 1)))
val as_seq_sub:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t ->
j:uint32_t{
i <= j /\
j <= V.size_of rv /\
rv_elems_inv h rv i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v j - U32.v i})
(decreases (U32.v j))
let as_seq_sub #a #rst #rg h rv i j =
as_seq_seq rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v (V.size_of rv)})
let as_seq #a #rst #rg h rv =
as_seq_sub h rv 0ul (V.size_of rv)
val as_seq_sub_as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
Lemma (S.equal (as_seq_sub h rv 0ul (V.size_of rv))
(as_seq h rv))
[SMTPat (as_seq_sub h rv 0ul (V.size_of rv))]
let as_seq_sub_as_seq #a #rst #rg h rv = ()
val as_seq_seq_index:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat{k < j - i} ->
Lemma (requires true)
(ensures (S.index (as_seq_seq rg h rs i j) k ==
Rgl?.r_repr rg h (S.index rs (i + k))))
(decreases j)
[SMTPat (S.index (as_seq_seq rg h rs i j) k)]
let rec as_seq_seq_index #a #rst rg h rs i j k =
if i = j then ()
else if k = j - i - 1 then ()
else as_seq_seq_index rg h rs i (j - 1) k
val as_seq_seq_eq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs1:S.seq a -> rs2:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs1 /\ rs_elems_inv rg h rs1 i j} ->
k:nat ->
l:nat{k <= l /\ l <= S.length rs2 /\ rs_elems_inv rg h rs2 k l} ->
Lemma (requires (S.equal (S.slice rs1 i j) (S.slice rs2 k l)))
(ensures (S.equal (as_seq_seq rg h rs1 i j)
(as_seq_seq rg h rs2 k l)))
let as_seq_seq_eq #a #rst rg h rs1 rs2 i j k l =
assert (forall (a:nat{a < j - i}).
S.index (as_seq_seq rg h rs1 i j) a ==
Rgl?.r_repr rg h (S.index rs1 (i + a)));
assert (forall (a:nat{a < l - k}).
S.index (as_seq_seq rg h rs2 k l) a ==
Rgl?.r_repr rg h (S.index rs2 (k + a)));
assert (S.length (S.slice rs1 i j) = j - i);
assert (S.length (S.slice rs2 k l) = l - k);
assert (forall (a:nat{a < j - i}).
S.index (S.slice rs1 i j) a ==
S.index (S.slice rs2 k l) a);
assert (forall (a:nat{a < j - i}).
S.index rs1 (i + a) == S.index rs2 (k + a))
val as_seq_seq_slice:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat -> l:nat{k <= l && l <= j - i} ->
Lemma (S.equal (S.slice (as_seq_seq rg h rs i j) k l)
(as_seq_seq rg h (S.slice rs (i + k) (i + l)) 0 (l - k)))
#reset-options "--z3rlimit 10"
let rec as_seq_seq_slice #a #rst rg h rs i j k l =
if k = l then ()
else (as_seq_seq_slice rg h rs i j k (l - 1);
as_seq_seq_index rg h rs i j (l - 1);
as_seq_seq_eq rg h
(S.slice rs (i + k) (i + l - 1))
(S.slice rs (i + k) (i + l))
0 (l - k - 1) 0 (l - k - 1))
val as_seq_seq_upd:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{
i <= j /\
j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
k:nat{i <= k && k < j} -> v:a{rg_inv rg h v} ->
Lemma (S.equal (as_seq_seq rg h (S.upd rs k v) i j)
(S.upd (as_seq_seq rg h rs i j) (k - i)
(Rgl?.r_repr rg h v)))
let rec as_seq_seq_upd #a #rst rg h rs i j k v =
if i = j then ()
else if k = j - 1 then ()
else as_seq_seq_upd rg h rs i (j - 1) k v
// Preservation based on disjointness
val as_seq_seq_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv_preserved rg rs i j p h0 h1;
S.equal (as_seq_seq rg h0 rs i j)
(as_seq_seq rg h1 rs i j))) | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 10,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val as_seq_seq_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv_preserved rg rs i j p h0 h1;
S.equal (as_seq_seq rg h0 rs i j)
(as_seq_seq rg h1 rs i j))) | [
"recursion"
] | LowStar.RVector.as_seq_seq_preserved | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
rg: LowStar.Regional.regional rst a ->
rs: FStar.Seq.Base.seq a ->
i: FStar.Integers.nat ->
j: FStar.Integers.nat{i <= j && j <= FStar.Seq.Base.length rs} ->
p: LowStar.Monotonic.Buffer.loc ->
h0: FStar.Monotonic.HyperStack.mem ->
h1: FStar.Monotonic.HyperStack.mem
-> FStar.Pervasives.Lemma
(requires
LowStar.RVector.rs_elems_inv rg h0 rs i j /\
LowStar.Monotonic.Buffer.loc_disjoint p (LowStar.RVector.rs_loc_elems rg rs i j) /\
LowStar.Monotonic.Buffer.modifies p h0 h1)
(ensures
([@@ FStar.Pervasives.inline_let ]let _ =
LowStar.RVector.rs_elems_inv_preserved rg rs i j p h0 h1
in
FStar.Seq.Base.equal (LowStar.RVector.as_seq_seq rg h0 rs i j)
(LowStar.RVector.as_seq_seq rg h1 rs i j))) | {
"end_col": 50,
"end_line": 632,
"start_col": 2,
"start_line": 629
} |
FStar.Pervasives.Lemma | val rv_inv_preserved_int:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
modifies (loc_all_regions_from false
(Rgl?.region_of rg (V.get h0 rv i))) h0 h1 /\
rg_inv rg h1 (V.get h1 rv i)))
(ensures (rv_inv h1 rv)) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rv_inv_preserved_int #a #rst #rg rv i h0 h1 =
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv)) 0 (U32.v i) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) 0 (U32.v i)
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1;
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv))
(U32.v i + 1) (U32.v (V.size_of rv)) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) (U32.v i + 1) (U32.v (V.size_of rv))
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1 | val rv_inv_preserved_int:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
modifies (loc_all_regions_from false
(Rgl?.region_of rg (V.get h0 rv i))) h0 h1 /\
rg_inv rg h1 (V.get h1 rv i)))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_int #a #rst #rg rv i h0 h1 = | false | null | true | rs_loc_elems_elem_disj rg
(V.as_seq h0 rv)
(V.frameOf rv)
0
(U32.v (V.size_of rv))
0
(U32.v i)
(U32.v i);
rs_elems_inv_preserved rg
(V.as_seq h0 rv)
0
(U32.v i)
(loc_all_regions_from false (Rgl?.region_of rg (V.get h1 rv i)))
h0
h1;
rs_loc_elems_elem_disj rg
(V.as_seq h0 rv)
(V.frameOf rv)
0
(U32.v (V.size_of rv))
(U32.v i + 1)
(U32.v (V.size_of rv))
(U32.v i);
rs_elems_inv_preserved rg
(V.as_seq h0 rv)
(U32.v i + 1)
(U32.v (V.size_of rv))
(loc_all_regions_from false (Rgl?.region_of rg (V.get h1 rv i)))
h0
h1 | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"lemma"
] | [
"LowStar.Regional.regional",
"LowStar.RVector.rvector",
"LowStar.Vector.uint32_t",
"Prims.b2t",
"FStar.Integers.op_Less",
"FStar.Integers.Unsigned",
"FStar.Integers.W32",
"LowStar.Vector.size_of",
"FStar.Monotonic.HyperStack.mem",
"LowStar.RVector.rs_elems_inv_preserved",
"LowStar.Vector.as_seq",
"FStar.Integers.op_Plus",
"FStar.Integers.Signed",
"FStar.Integers.Winfinite",
"FStar.UInt32.v",
"LowStar.Monotonic.Buffer.loc_all_regions_from",
"LowStar.Regional.__proj__Rgl__item__region_of",
"LowStar.Vector.get",
"Prims.unit",
"LowStar.RVector.rs_loc_elems_elem_disj",
"LowStar.Vector.frameOf"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1))
val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j)))
let rv_loc_elems_included #a #rst #rg h rv i j =
rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
// Properties about disjointness of locations
val rs_loc_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
l:nat{i <= l && l < j && k <> l} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj #a #rst rg rs prid i j k l = ()
val rs_loc_elem_disj_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (
forall (k:nat{i <= k && k < j}).
forall (l:nat{i <= l && l < j && k <> l}).
loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj_forall #a #rst rg rs prid i j = ()
val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2)
let rec rs_loc_elems_elem_disj #a #rst rg rs prid i j k1 k2 l =
if k1 = k2 then ()
else (rs_loc_elem_disj rg rs prid i j (k2 - 1) l;
rs_loc_elems_elem_disj rg rs prid i j k1 (k2 - 1) l)
val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2)
let rec rs_loc_elems_disj #a #rst rg rs prid i j k1 k2 l1 l2 =
if k1 = k2 then ()
else (rs_loc_elems_elem_disj rg rs prid i j l1 l2 (k2 - 1);
rs_loc_elems_disj rg rs prid i j k1 (k2 - 1) l1 l2)
val rv_loc_elems_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
k1:uint32_t{i <= k1} ->
k2:uint32_t{k1 <= k2 && k2 <= j} ->
l1:uint32_t{i <= l1} ->
l2:uint32_t{l1 <= l2 && l2 <= j} ->
Lemma (requires (rv_elems_reg h rv i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rv_loc_elems h rv k1 k2)
(rv_loc_elems h rv l1 l2)))
let rv_loc_elems_disj #a #rst #rg h rv i j k1 k2 l1 l2 =
rs_loc_elems_disj rg (V.as_seq h rv) (V.frameOf rv)
(U32.v i) (U32.v j) (U32.v k1) (U32.v k2) (U32.v l1) (U32.v l2)
val rs_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_region_only false prid)))
(decreases j)
let rec rs_loc_elems_parent_disj #a #rst rg rs prid i j =
if i = j then ()
else rs_loc_elems_parent_disj rg rs prid i (j - 1)
val rv_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_region_only false (V.frameOf rv))))
let rv_loc_elems_parent_disj #a #rst #rg h rv i j =
rs_loc_elems_parent_disj rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val rs_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> drid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (V.forall_seq rs i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_all_regions_from false drid)))
(decreases j)
let rec rs_loc_elems_each_disj #a #rst rg rs drid i j =
if i = j then ()
else rs_loc_elems_each_disj rg rs drid i (j - 1)
val rv_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
drid:HS.rid ->
Lemma (requires (V.forall_ h rv i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_all_regions_from false drid)))
let rv_loc_elems_each_disj #a #rst #rg h rv i j drid =
rs_loc_elems_each_disj rg (V.as_seq h rv) drid (U32.v i) (U32.v j)
// Preservation based on disjointness
val rv_loc_elems_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
loc_disjoint p (V.loc_vector_within rv i j) /\
modifies p h0 h1))
(ensures (rv_loc_elems h0 rv i j ==
rv_loc_elems h1 rv i j))
(decreases (U32.v j))
let rec rv_loc_elems_preserved #a #rst #rg rv i j p h0 h1 =
if i = j then ()
else (V.loc_vector_within_includes rv i j (j - 1ul) j;
V.get_preserved rv (j - 1ul) p h0 h1;
assert (V.get h0 rv (j - 1ul) == V.get h1 rv (j - 1ul));
V.loc_vector_within_includes rv i j i (j - 1ul);
rv_loc_elems_preserved rv i (j - 1ul) p h0 h1)
val rs_elems_inv_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv rg h1 rs i j))
(decreases j)
let rec rs_elems_inv_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val rv_elems_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (V.loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
modifies p h0 h1))
(ensures (rv_elems_inv h1 rv i j))
let rv_elems_inv_preserved #a #rst #rg rv i j p h0 h1 =
rs_elems_inv_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val rv_inv_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_ #a #rst #rg rv p h0 h1 =
rv_elems_inv_preserved #a #rst #rg rv 0ul (V.size_of rv) p h0 h1
// The first core lemma of `rvector`
val rv_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let rv_inv_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
rv_inv_preserved_ rv p h0 h1
val rv_inv_preserved_int:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
modifies (loc_all_regions_from false
(Rgl?.region_of rg (V.get h0 rv i))) h0 h1 /\
rg_inv rg h1 (V.get h1 rv i)))
(ensures (rv_inv h1 rv)) | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val rv_inv_preserved_int:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
modifies (loc_all_regions_from false
(Rgl?.region_of rg (V.get h0 rv i))) h0 h1 /\
rg_inv rg h1 (V.get h1 rv i)))
(ensures (rv_inv h1 rv)) | [] | LowStar.RVector.rv_inv_preserved_int | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
rv: LowStar.RVector.rvector rg ->
i: LowStar.Vector.uint32_t{i < LowStar.Vector.size_of rv} ->
h0: FStar.Monotonic.HyperStack.mem ->
h1: FStar.Monotonic.HyperStack.mem
-> FStar.Pervasives.Lemma
(requires
LowStar.RVector.rv_inv h0 rv /\
LowStar.Monotonic.Buffer.modifies (LowStar.Monotonic.Buffer.loc_all_regions_from false
(Rgl?.region_of rg (LowStar.Vector.get h0 rv i)))
h0
h1 /\ LowStar.Regional.rg_inv rg h1 (LowStar.Vector.get h1 rv i))
(ensures LowStar.RVector.rv_inv h1 rv) | {
"end_col": 9,
"end_line": 496,
"start_col": 2,
"start_line": 480
} |
FStar.Pervasives.Lemma | val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r)))) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j) | val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j = | false | null | true | rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j) | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"lemma"
] | [
"LowStar.Regional.regional",
"FStar.Monotonic.HyperStack.mem",
"LowStar.RVector.rvector",
"LowStar.Vector.uint32_t",
"Prims.b2t",
"Prims.op_AmpAmp",
"FStar.Integers.op_Less_Equals",
"FStar.Integers.Unsigned",
"FStar.Integers.W32",
"LowStar.Vector.size_of",
"LowStar.RVector.rs_elems_inv_live_region",
"LowStar.Vector.as_seq",
"FStar.UInt32.v",
"Prims.unit"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r)))) | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r)))) | [] | LowStar.RVector.rv_elems_inv_live_region | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
h: FStar.Monotonic.HyperStack.mem ->
rv: LowStar.RVector.rvector rg ->
i: LowStar.Vector.uint32_t ->
j: LowStar.Vector.uint32_t{i <= j && j <= LowStar.Vector.size_of rv}
-> FStar.Pervasives.Lemma (requires LowStar.RVector.rv_elems_inv h rv i j)
(ensures
LowStar.Vector.forall_ h
rv
i
j
(fun r -> FStar.Monotonic.HyperStack.live_region h (Rgl?.region_of rg r))) | {
"end_col": 67,
"end_line": 157,
"start_col": 2,
"start_line": 157
} |
Prims.GTot | val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0 | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j) | val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j = | false | null | false | rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j) | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"sometrivial"
] | [
"LowStar.Regional.regional",
"FStar.Monotonic.HyperStack.mem",
"LowStar.RVector.rvector",
"LowStar.Vector.uint32_t",
"Prims.b2t",
"Prims.op_AmpAmp",
"FStar.Integers.op_Less_Equals",
"FStar.Integers.Unsigned",
"FStar.Integers.W32",
"LowStar.Vector.size_of",
"LowStar.RVector.rs_elems_reg",
"LowStar.Vector.as_seq",
"LowStar.Vector.frameOf",
"FStar.UInt32.v"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0 | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0 | [] | LowStar.RVector.rv_elems_reg | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
h: FStar.Monotonic.HyperStack.mem ->
rv: LowStar.RVector.rvector rg ->
i: LowStar.Vector.uint32_t ->
j: LowStar.Vector.uint32_t{i <= j && j <= LowStar.Vector.size_of rv}
-> Prims.GTot Type0 | {
"end_col": 68,
"end_line": 112,
"start_col": 2,
"start_line": 112
} |
FStar.HyperStack.ST.ST | val free_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
idx:uint32_t{idx < V.size_of rv} ->
HST.ST unit
(requires (fun h0 ->
V.live h0 rv /\
rv_elems_inv h0 rv 0ul (idx + 1ul) /\
rv_elems_reg h0 rv 0ul (idx + 1ul)))
(ensures (fun h0 _ h1 ->
modifies (rv_loc_elems h0 rv 0ul (idx + 1ul)) h0 h1)) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rec free_elems #a #rst #rg rv idx =
let hh0 = HST.get () in
rg_free rg (V.index rv idx);
let hh1 = HST.get () in
rs_loc_elems_elem_disj
rg (V.as_seq hh0 rv) (V.frameOf rv)
0 (U32.v idx + 1) 0 (U32.v idx) (U32.v idx);
rv_elems_inv_preserved
rv 0ul idx (rs_loc_elem rg (V.as_seq hh0 rv) (U32.v idx)) hh0 hh1;
if idx <> 0ul then
free_elems rv (idx - 1ul) | val free_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
idx:uint32_t{idx < V.size_of rv} ->
HST.ST unit
(requires (fun h0 ->
V.live h0 rv /\
rv_elems_inv h0 rv 0ul (idx + 1ul) /\
rv_elems_reg h0 rv 0ul (idx + 1ul)))
(ensures (fun h0 _ h1 ->
modifies (rv_loc_elems h0 rv 0ul (idx + 1ul)) h0 h1))
let rec free_elems #a #rst #rg rv idx = | true | null | false | let hh0 = HST.get () in
rg_free rg (V.index rv idx);
let hh1 = HST.get () in
rs_loc_elems_elem_disj rg
(V.as_seq hh0 rv)
(V.frameOf rv)
0
(U32.v idx + 1)
0
(U32.v idx)
(U32.v idx);
rv_elems_inv_preserved rv 0ul idx (rs_loc_elem rg (V.as_seq hh0 rv) (U32.v idx)) hh0 hh1;
if idx <> 0ul then free_elems rv (idx - 1ul) | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [] | [
"LowStar.Regional.regional",
"LowStar.RVector.rvector",
"LowStar.Vector.uint32_t",
"Prims.b2t",
"FStar.Integers.op_Less",
"FStar.Integers.Unsigned",
"FStar.Integers.W32",
"LowStar.Vector.size_of",
"Prims.op_disEquality",
"FStar.UInt32.t",
"FStar.UInt32.__uint_to_t",
"LowStar.RVector.free_elems",
"FStar.Integers.op_Subtraction",
"Prims.unit",
"Prims.bool",
"LowStar.RVector.rv_elems_inv_preserved",
"LowStar.RVector.rs_loc_elem",
"LowStar.Vector.as_seq",
"FStar.UInt32.v",
"LowStar.RVector.rs_loc_elems_elem_disj",
"LowStar.Vector.frameOf",
"FStar.Integers.op_Plus",
"FStar.Integers.Signed",
"FStar.Integers.Winfinite",
"FStar.Monotonic.HyperStack.mem",
"FStar.HyperStack.ST.get",
"LowStar.Regional.rg_free",
"LowStar.Vector.index"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1))
val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j)))
let rv_loc_elems_included #a #rst #rg h rv i j =
rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
// Properties about disjointness of locations
val rs_loc_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
l:nat{i <= l && l < j && k <> l} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj #a #rst rg rs prid i j k l = ()
val rs_loc_elem_disj_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (
forall (k:nat{i <= k && k < j}).
forall (l:nat{i <= l && l < j && k <> l}).
loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj_forall #a #rst rg rs prid i j = ()
val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2)
let rec rs_loc_elems_elem_disj #a #rst rg rs prid i j k1 k2 l =
if k1 = k2 then ()
else (rs_loc_elem_disj rg rs prid i j (k2 - 1) l;
rs_loc_elems_elem_disj rg rs prid i j k1 (k2 - 1) l)
val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2)
let rec rs_loc_elems_disj #a #rst rg rs prid i j k1 k2 l1 l2 =
if k1 = k2 then ()
else (rs_loc_elems_elem_disj rg rs prid i j l1 l2 (k2 - 1);
rs_loc_elems_disj rg rs prid i j k1 (k2 - 1) l1 l2)
val rv_loc_elems_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
k1:uint32_t{i <= k1} ->
k2:uint32_t{k1 <= k2 && k2 <= j} ->
l1:uint32_t{i <= l1} ->
l2:uint32_t{l1 <= l2 && l2 <= j} ->
Lemma (requires (rv_elems_reg h rv i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rv_loc_elems h rv k1 k2)
(rv_loc_elems h rv l1 l2)))
let rv_loc_elems_disj #a #rst #rg h rv i j k1 k2 l1 l2 =
rs_loc_elems_disj rg (V.as_seq h rv) (V.frameOf rv)
(U32.v i) (U32.v j) (U32.v k1) (U32.v k2) (U32.v l1) (U32.v l2)
val rs_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_region_only false prid)))
(decreases j)
let rec rs_loc_elems_parent_disj #a #rst rg rs prid i j =
if i = j then ()
else rs_loc_elems_parent_disj rg rs prid i (j - 1)
val rv_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_region_only false (V.frameOf rv))))
let rv_loc_elems_parent_disj #a #rst #rg h rv i j =
rs_loc_elems_parent_disj rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val rs_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> drid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (V.forall_seq rs i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_all_regions_from false drid)))
(decreases j)
let rec rs_loc_elems_each_disj #a #rst rg rs drid i j =
if i = j then ()
else rs_loc_elems_each_disj rg rs drid i (j - 1)
val rv_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
drid:HS.rid ->
Lemma (requires (V.forall_ h rv i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_all_regions_from false drid)))
let rv_loc_elems_each_disj #a #rst #rg h rv i j drid =
rs_loc_elems_each_disj rg (V.as_seq h rv) drid (U32.v i) (U32.v j)
// Preservation based on disjointness
val rv_loc_elems_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
loc_disjoint p (V.loc_vector_within rv i j) /\
modifies p h0 h1))
(ensures (rv_loc_elems h0 rv i j ==
rv_loc_elems h1 rv i j))
(decreases (U32.v j))
let rec rv_loc_elems_preserved #a #rst #rg rv i j p h0 h1 =
if i = j then ()
else (V.loc_vector_within_includes rv i j (j - 1ul) j;
V.get_preserved rv (j - 1ul) p h0 h1;
assert (V.get h0 rv (j - 1ul) == V.get h1 rv (j - 1ul));
V.loc_vector_within_includes rv i j i (j - 1ul);
rv_loc_elems_preserved rv i (j - 1ul) p h0 h1)
val rs_elems_inv_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv rg h1 rs i j))
(decreases j)
let rec rs_elems_inv_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val rv_elems_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (V.loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
modifies p h0 h1))
(ensures (rv_elems_inv h1 rv i j))
let rv_elems_inv_preserved #a #rst #rg rv i j p h0 h1 =
rs_elems_inv_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val rv_inv_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_ #a #rst #rg rv p h0 h1 =
rv_elems_inv_preserved #a #rst #rg rv 0ul (V.size_of rv) p h0 h1
// The first core lemma of `rvector`
val rv_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let rv_inv_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
rv_inv_preserved_ rv p h0 h1
val rv_inv_preserved_int:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
modifies (loc_all_regions_from false
(Rgl?.region_of rg (V.get h0 rv i))) h0 h1 /\
rg_inv rg h1 (V.get h1 rv i)))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_int #a #rst #rg rv i h0 h1 =
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv)) 0 (U32.v i) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) 0 (U32.v i)
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1;
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv))
(U32.v i + 1) (U32.v (V.size_of rv)) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) (U32.v i + 1) (U32.v (V.size_of rv))
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1
/// Representation
val as_seq_seq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = j - i})
(decreases j)
let rec as_seq_seq #a #rst rg h rs i j =
if i = j then S.empty
else S.snoc (as_seq_seq rg h rs i (j - 1))
(Rgl?.r_repr rg h (S.index rs (j - 1)))
val as_seq_sub:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t ->
j:uint32_t{
i <= j /\
j <= V.size_of rv /\
rv_elems_inv h rv i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v j - U32.v i})
(decreases (U32.v j))
let as_seq_sub #a #rst #rg h rv i j =
as_seq_seq rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v (V.size_of rv)})
let as_seq #a #rst #rg h rv =
as_seq_sub h rv 0ul (V.size_of rv)
val as_seq_sub_as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
Lemma (S.equal (as_seq_sub h rv 0ul (V.size_of rv))
(as_seq h rv))
[SMTPat (as_seq_sub h rv 0ul (V.size_of rv))]
let as_seq_sub_as_seq #a #rst #rg h rv = ()
val as_seq_seq_index:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat{k < j - i} ->
Lemma (requires true)
(ensures (S.index (as_seq_seq rg h rs i j) k ==
Rgl?.r_repr rg h (S.index rs (i + k))))
(decreases j)
[SMTPat (S.index (as_seq_seq rg h rs i j) k)]
let rec as_seq_seq_index #a #rst rg h rs i j k =
if i = j then ()
else if k = j - i - 1 then ()
else as_seq_seq_index rg h rs i (j - 1) k
val as_seq_seq_eq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs1:S.seq a -> rs2:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs1 /\ rs_elems_inv rg h rs1 i j} ->
k:nat ->
l:nat{k <= l /\ l <= S.length rs2 /\ rs_elems_inv rg h rs2 k l} ->
Lemma (requires (S.equal (S.slice rs1 i j) (S.slice rs2 k l)))
(ensures (S.equal (as_seq_seq rg h rs1 i j)
(as_seq_seq rg h rs2 k l)))
let as_seq_seq_eq #a #rst rg h rs1 rs2 i j k l =
assert (forall (a:nat{a < j - i}).
S.index (as_seq_seq rg h rs1 i j) a ==
Rgl?.r_repr rg h (S.index rs1 (i + a)));
assert (forall (a:nat{a < l - k}).
S.index (as_seq_seq rg h rs2 k l) a ==
Rgl?.r_repr rg h (S.index rs2 (k + a)));
assert (S.length (S.slice rs1 i j) = j - i);
assert (S.length (S.slice rs2 k l) = l - k);
assert (forall (a:nat{a < j - i}).
S.index (S.slice rs1 i j) a ==
S.index (S.slice rs2 k l) a);
assert (forall (a:nat{a < j - i}).
S.index rs1 (i + a) == S.index rs2 (k + a))
val as_seq_seq_slice:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat -> l:nat{k <= l && l <= j - i} ->
Lemma (S.equal (S.slice (as_seq_seq rg h rs i j) k l)
(as_seq_seq rg h (S.slice rs (i + k) (i + l)) 0 (l - k)))
#reset-options "--z3rlimit 10"
let rec as_seq_seq_slice #a #rst rg h rs i j k l =
if k = l then ()
else (as_seq_seq_slice rg h rs i j k (l - 1);
as_seq_seq_index rg h rs i j (l - 1);
as_seq_seq_eq rg h
(S.slice rs (i + k) (i + l - 1))
(S.slice rs (i + k) (i + l))
0 (l - k - 1) 0 (l - k - 1))
val as_seq_seq_upd:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{
i <= j /\
j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
k:nat{i <= k && k < j} -> v:a{rg_inv rg h v} ->
Lemma (S.equal (as_seq_seq rg h (S.upd rs k v) i j)
(S.upd (as_seq_seq rg h rs i j) (k - i)
(Rgl?.r_repr rg h v)))
let rec as_seq_seq_upd #a #rst rg h rs i j k v =
if i = j then ()
else if k = j - 1 then ()
else as_seq_seq_upd rg h rs i (j - 1) k v
// Preservation based on disjointness
val as_seq_seq_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv_preserved rg rs i j p h0 h1;
S.equal (as_seq_seq rg h0 rs i j)
(as_seq_seq rg h1 rs i j)))
let rec as_seq_seq_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
as_seq_seq_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val as_seq_sub_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
loc_disjoint p (V.loc_vector rv) /\
modifies p h0 h1))
(ensures (rv_elems_inv_preserved rv i j p h0 h1;
S.equal (as_seq_sub h0 rv i j)
(as_seq_sub h1 rv i j)))
let as_seq_sub_preserved #a #rst #rg rv i j p h0 h1 =
as_seq_seq_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val as_seq_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv_preserved_ rv p h0 h1;
S.equal (as_seq h0 rv) (as_seq h1 rv)))
let as_seq_preserved_ #a #rst #rg rv p h0 h1 =
as_seq_sub_preserved rv 0ul (V.size_of rv) p h0 h1
// The second core lemma of `rvector`
val as_seq_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv_preserved rv p h0 h1;
S.equal (as_seq h0 rv) (as_seq h1 rv)))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let as_seq_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
as_seq_preserved_ rv p h0 h1
/// Construction
val alloc_empty:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 bv h1 -> h0 == h1 /\ V.size_of bv = 0ul))
let alloc_empty #a #rst rg =
V.alloc_empty a
val alloc_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
cidx:uint32_t{cidx <= V.size_of rv} ->
HST.ST unit
(requires (fun h0 -> rv_itself_inv h0 rv))
(ensures (fun h0 _ h1 ->
modifies (V.loc_vector_within rv 0ul cidx) h0 h1 /\
rv_itself_inv h1 rv /\
rv_elems_inv h1 rv 0ul cidx /\
rv_elems_reg h1 rv 0ul cidx /\
S.equal (as_seq_sub h1 rv 0ul cidx)
(S.create (U32.v cidx) (Ghost.reveal (Rgl?.irepr rg))) /\
// the loop invariant for this function
V.forall_ h1 rv 0ul cidx
(fun r -> HS.fresh_region (Rgl?.region_of rg r) h0 h1 /\
Rgl?.r_alloc_p rg r) /\
Set.subset (Map.domain (HS.get_hmap h0))
(Map.domain (HS.get_hmap h1))))
(decreases (U32.v cidx))
#reset-options "--z3rlimit 20"
let rec alloc_ #a #rst #rg rv cidx =
let hh0 = HST.get () in
if cidx = 0ul then ()
else (let nrid = HST.new_region (V.frameOf rv) in
let v = rg_alloc rg nrid in
let hh1 = HST.get () in
V.assign rv (cidx - 1ul) v;
let hh2 = HST.get () in
V.loc_vector_within_included rv (cidx - 1ul) cidx;
Rgl?.r_sep
rg (V.get hh2 rv (cidx - 1ul))
(V.loc_vector_within rv (cidx - 1ul) cidx)
hh1 hh2;
alloc_ rv (cidx - 1ul);
let hh3 = HST.get () in
V.loc_vector_within_included rv 0ul (cidx - 1ul);
Rgl?.r_sep
rg (V.get hh3 rv (cidx - 1ul))
(V.loc_vector_within rv 0ul (cidx - 1ul))
hh2 hh3;
V.forall2_extend hh3 rv 0ul (cidx - 1ul)
(fun r1 r2 -> HS.disjoint (Rgl?.region_of rg r1)
(Rgl?.region_of rg r2));
V.loc_vector_within_union_rev rv 0ul cidx)
val alloc_rid:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} -> rid:HST.erid ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
V.frameOf rv = rid /\
V.size_of rv = len /\
V.forall_all h1 rv (fun r -> Rgl?.r_alloc_p rg r) /\
S.equal (as_seq h1 rv)
(S.create (U32.v len) (Ghost.reveal (Rgl?.irepr rg)))))
let alloc_rid #a #rst rg len rid =
let vec = V.alloc_rid len (rg_dummy rg) rid in
alloc_ #a #rst #rg vec len;
V.loc_vector_within_included vec 0ul len;
vec
val alloc_reserve:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} -> rid:HST.erid ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
V.frameOf rv = rid /\
V.size_of rv = 0ul /\
S.equal (as_seq h1 rv) S.empty /\
Set.equal (Map.domain (HS.get_hmap h0))
(Map.domain (HS.get_hmap h1)) /\
B.fresh_loc (V.loc_vector rv) h0 h1))
let alloc_reserve #a #rst rg len rid =
V.alloc_reserve len (rg_dummy rg) rid
val alloc:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
HS.fresh_region (V.frameOf rv) h0 h1 /\
V.size_of rv = len /\
V.forall_all h1 rv (fun r -> Rgl?.r_alloc_p rg r) /\
S.equal (as_seq h1 rv)
(S.create (U32.v len) (Ghost.reveal (Rgl?.irepr rg)))))
let alloc #a #rst rg len =
let nrid = HST.new_region HS.root in
alloc_rid rg len nrid
val insert:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg{not (V.is_full rv)} -> v:a ->
HST.ST (rvector rg)
(requires (fun h0 ->
rv_inv h0 rv /\ rg_inv rg h0 v /\
HS.extends (Rgl?.region_of rg v) (V.frameOf rv) /\
V.forall_all h0 rv
(fun b -> HS.disjoint (Rgl?.region_of rg b)
(Rgl?.region_of rg v))))
(ensures (fun h0 irv h1 ->
V.size_of irv = V.size_of rv + 1ul /\
V.frameOf rv = V.frameOf irv /\
modifies (loc_union (V.loc_addr_of_vector rv)
(V.loc_vector irv)) h0 h1 /\
rv_inv h1 irv /\
V.get h1 irv (V.size_of rv) == v /\
S.equal (as_seq h1 irv)
(S.snoc (as_seq h0 rv) (Rgl?.r_repr rg h0 v))))
#reset-options "--z3rlimit 20"
let insert #a #rst #rg rv v =
let hh0 = HST.get () in
let irv = V.insert rv v in
let hh1 = HST.get () in
// Safety
rs_loc_elems_parent_disj
rg (V.as_seq hh0 rv) (V.frameOf rv) 0 (U32.v (V.size_of rv));
rs_elems_inv_preserved
rg (V.as_seq hh0 rv) 0 (U32.v (V.size_of rv))
(loc_region_only false (V.frameOf rv))
hh0 hh1;
Rgl?.r_sep rg v
(loc_region_only false (V.frameOf rv))
hh0 hh1;
// Correctness
assert (S.equal (V.as_seq hh0 rv)
(S.slice (V.as_seq hh1 irv) 0 (U32.v (V.size_of rv))));
as_seq_seq_preserved
rg (V.as_seq hh0 rv)
0 (U32.v (V.size_of rv))
(loc_region_only false (V.frameOf rv)) hh0 hh1;
as_seq_seq_slice
rg hh1 (V.as_seq hh1 irv) 0 (U32.v (V.size_of irv))
0 (U32.v (V.size_of rv));
irv
val insert_copy:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> cp:copyable #rst a rg ->
rv:rvector rg{not (V.is_full rv)} -> v:a ->
HST.ST (rvector rg)
(requires (fun h0 ->
rv_inv h0 rv /\ rg_inv rg h0 v /\
HS.disjoint (Rgl?.region_of rg v) (V.frameOf rv)))
(ensures (fun h0 irv h1 ->
V.size_of irv = V.size_of rv + 1ul /\
V.frameOf rv = V.frameOf irv /\
modifies (loc_rvector rv) h0 h1 /\
rv_inv h1 irv /\
S.equal (as_seq h1 irv)
(S.snoc (as_seq h0 rv) (Rgl?.r_repr rg h0 v))))
let insert_copy #a #rst #rg cp rv v =
let hh0 = HST.get () in
rv_elems_inv_live_region hh0 rv 0ul (V.size_of rv);
let nrid = HST.new_region (V.frameOf rv) in
let nv = rg_alloc rg nrid in
let hh1 = HST.get () in
Rgl?.r_sep rg v loc_none hh0 hh1;
rv_inv_preserved rv loc_none hh0 hh1;
as_seq_preserved rv loc_none hh0 hh1;
Cpy?.copy cp (Rgl?.state rg) v nv;
let hh2 = HST.get () in
rv_loc_elems_each_disj hh2 rv 0ul (V.size_of rv) nrid;
rv_inv_preserved_ rv (loc_all_regions_from false nrid) hh1 hh2;
as_seq_preserved_ rv (loc_all_regions_from false nrid) hh1 hh2;
insert rv nv
val assign:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} -> v:a ->
HST.ST unit
(requires (fun h0 ->
// rv_inv h0 rv /\
rv_itself_inv h0 rv /\
rv_elems_inv h0 rv 0ul i /\
rv_elems_inv h0 rv (i + 1ul) (V.size_of rv) /\
elems_reg h0 rv /\
V.forall_ h0 rv 0ul i
(fun b -> HS.disjoint (Rgl?.region_of rg b)
(Rgl?.region_of rg v)) /\
V.forall_ h0 rv (i + 1ul) (V.size_of rv)
(fun b -> HS.disjoint (Rgl?.region_of rg b)
(Rgl?.region_of rg v)) /\
rg_inv rg h0 v /\
HS.extends (Rgl?.region_of rg v) (V.frameOf rv)))
(ensures (fun h0 _ h1 ->
modifies (V.loc_vector_within rv i (i + 1ul)) h0 h1 /\
rv_inv h1 rv /\
V.get h1 rv i == v /\
S.equal (as_seq h1 rv)
(S.append
(as_seq_sub h0 rv 0ul i)
(S.cons (Rgl?.r_repr rg h0 v)
(as_seq_sub h0 rv (i + 1ul) (V.size_of rv))))))
let assign #a #rst #rg rv i v =
let hh0 = HST.get () in
V.assign rv i v;
let hh1 = HST.get () in
// Safety
rs_loc_elems_parent_disj
rg (V.as_seq hh0 rv) (V.frameOf rv) 0 (U32.v i);
rs_loc_elems_parent_disj
rg (V.as_seq hh0 rv) (V.frameOf rv) (U32.v i + 1) (U32.v (V.size_of rv));
rs_elems_inv_preserved
rg (V.as_seq hh0 rv) 0 (U32.v i)
(V.loc_vector rv)
hh0 hh1;
rs_elems_inv_preserved
rg (V.as_seq hh0 rv) (U32.v i + 1) (U32.v (V.size_of rv))
(V.loc_vector rv)
hh0 hh1;
Rgl?.r_sep rg v (V.loc_vector rv) hh0 hh1;
// Correctness
rs_loc_elems_parent_disj
rg (V.as_seq hh1 rv) (V.frameOf rv) 0 (U32.v (V.size_of rv));
as_seq_seq_preserved
rg (V.as_seq hh1 rv)
0 (U32.v (V.size_of rv))
(V.loc_vector rv) hh0 hh1
private val r_sep_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
v:a{rg_inv rg h0 v} ->
Lemma (requires (loc_disjoint (loc_all_regions_from
false (Rgl?.region_of rg v)) p /\
modifies p h0 h1))
(ensures (rg_inv rg h1 v /\
Rgl?.r_repr rg h0 v == Rgl?.r_repr rg h1 v))
private let r_sep_forall #a #rst rg p h0 h1 v =
Rgl?.r_sep rg v p h0 h1
val assign_copy:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> cp:copyable #rst a rg ->
rv:rvector rg ->
i:uint32_t{i < V.size_of rv} -> v:a ->
HST.ST unit
(requires (fun h0 ->
rv_inv h0 rv /\
rg_inv rg h0 v /\
HS.disjoint (Rgl?.region_of rg v) (V.frameOf rv)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg (V.get h1 rv i))) h0 h1 /\
rv_inv h1 rv /\
S.equal (as_seq h1 rv)
(S.upd (as_seq h0 rv) (U32.v i) (Rgl?.r_repr rg h0 v))))
let assign_copy #a #rst #rg cp rv i v =
let hh0 = HST.get () in
Cpy?.copy cp (Rgl?.state rg) v (V.index rv i);
let hh1 = HST.get () in
// Safety
rv_inv_preserved_int #a #rst #rg rv i hh0 hh1;
// Correctness
forall_intro
(move_requires
(rs_loc_elem_disj
rg (V.as_seq hh0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv))
(U32.v i)));
assert (forall (k:nat{k <> U32.v i && k < U32.v (V.size_of rv)}).
loc_disjoint (rs_loc_elem rg (V.as_seq hh0 rv) k)
(rs_loc_elem rg (V.as_seq hh0 rv) (U32.v i)));
forall_intro
(move_requires
(r_sep_forall
rg (rs_loc_elem rg (V.as_seq hh0 rv) (U32.v i))
hh0 hh1));
assert (forall (k:nat{k <> U32.v i && k < U32.v (V.size_of rv)}).
loc_disjoint (rs_loc_elem rg (V.as_seq hh0 rv) k)
(rs_loc_elem rg (V.as_seq hh0 rv) (U32.v i)) ==>
Rgl?.r_repr rg hh1 (S.index (V.as_seq hh1 rv) k) ==
Rgl?.r_repr rg hh0 (S.index (V.as_seq hh0 rv) k));
assert (forall (k:nat{k <> U32.v i && k < U32.v (V.size_of rv)}).
Rgl?.r_repr rg hh1 (S.index (V.as_seq hh1 rv) k) ==
Rgl?.r_repr rg hh0 (S.index (V.as_seq hh0 rv) k));
assert (forall (k:nat{k <> U32.v i && k < U32.v (V.size_of rv)}).
S.index (as_seq_seq rg hh1 (V.as_seq hh1 rv)
0 (U32.v (V.size_of rv))) k ==
S.index (as_seq_seq rg hh0 (V.as_seq hh0 rv)
0 (U32.v (V.size_of rv))) k)
val free_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
idx:uint32_t{idx < V.size_of rv} ->
HST.ST unit
(requires (fun h0 ->
V.live h0 rv /\
rv_elems_inv h0 rv 0ul (idx + 1ul) /\
rv_elems_reg h0 rv 0ul (idx + 1ul)))
(ensures (fun h0 _ h1 -> | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val free_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
idx:uint32_t{idx < V.size_of rv} ->
HST.ST unit
(requires (fun h0 ->
V.live h0 rv /\
rv_elems_inv h0 rv 0ul (idx + 1ul) /\
rv_elems_reg h0 rv 0ul (idx + 1ul)))
(ensures (fun h0 _ h1 ->
modifies (rv_loc_elems h0 rv 0ul (idx + 1ul)) h0 h1)) | [
"recursion"
] | LowStar.RVector.free_elems | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | rv: LowStar.RVector.rvector rg -> idx: LowStar.Vector.uint32_t{idx < LowStar.Vector.size_of rv}
-> FStar.HyperStack.ST.ST Prims.unit | {
"end_col": 29,
"end_line": 1017,
"start_col": 39,
"start_line": 1005
} |
FStar.Pervasives.Lemma | val as_seq_seq_eq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs1:S.seq a -> rs2:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs1 /\ rs_elems_inv rg h rs1 i j} ->
k:nat ->
l:nat{k <= l /\ l <= S.length rs2 /\ rs_elems_inv rg h rs2 k l} ->
Lemma (requires (S.equal (S.slice rs1 i j) (S.slice rs2 k l)))
(ensures (S.equal (as_seq_seq rg h rs1 i j)
(as_seq_seq rg h rs2 k l))) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let as_seq_seq_eq #a #rst rg h rs1 rs2 i j k l =
assert (forall (a:nat{a < j - i}).
S.index (as_seq_seq rg h rs1 i j) a ==
Rgl?.r_repr rg h (S.index rs1 (i + a)));
assert (forall (a:nat{a < l - k}).
S.index (as_seq_seq rg h rs2 k l) a ==
Rgl?.r_repr rg h (S.index rs2 (k + a)));
assert (S.length (S.slice rs1 i j) = j - i);
assert (S.length (S.slice rs2 k l) = l - k);
assert (forall (a:nat{a < j - i}).
S.index (S.slice rs1 i j) a ==
S.index (S.slice rs2 k l) a);
assert (forall (a:nat{a < j - i}).
S.index rs1 (i + a) == S.index rs2 (k + a)) | val as_seq_seq_eq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs1:S.seq a -> rs2:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs1 /\ rs_elems_inv rg h rs1 i j} ->
k:nat ->
l:nat{k <= l /\ l <= S.length rs2 /\ rs_elems_inv rg h rs2 k l} ->
Lemma (requires (S.equal (S.slice rs1 i j) (S.slice rs2 k l)))
(ensures (S.equal (as_seq_seq rg h rs1 i j)
(as_seq_seq rg h rs2 k l)))
let as_seq_seq_eq #a #rst rg h rs1 rs2 i j k l = | false | null | true | assert (forall (a: nat{a < j - i}).
S.index (as_seq_seq rg h rs1 i j) a == Rgl?.r_repr rg h (S.index rs1 (i + a)));
assert (forall (a: nat{a < l - k}).
S.index (as_seq_seq rg h rs2 k l) a == Rgl?.r_repr rg h (S.index rs2 (k + a)));
assert (S.length (S.slice rs1 i j) = j - i);
assert (S.length (S.slice rs2 k l) = l - k);
assert (forall (a: nat{a < j - i}). S.index (S.slice rs1 i j) a == S.index (S.slice rs2 k l) a);
assert (forall (a: nat{a < j - i}). S.index rs1 (i + a) == S.index rs2 (k + a)) | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"lemma"
] | [
"LowStar.Regional.regional",
"FStar.Monotonic.HyperStack.mem",
"FStar.Seq.Base.seq",
"FStar.Integers.nat",
"Prims.l_and",
"Prims.b2t",
"FStar.Integers.op_Less_Equals",
"FStar.Integers.Signed",
"FStar.Integers.Winfinite",
"FStar.Seq.Base.length",
"LowStar.RVector.rs_elems_inv",
"Prims._assert",
"Prims.l_Forall",
"FStar.Integers.op_Less",
"FStar.Integers.op_Subtraction",
"Prims.eq2",
"FStar.Seq.Base.index",
"FStar.Integers.op_Plus",
"Prims.unit",
"FStar.Seq.Base.slice",
"Prims.op_Equality",
"Prims.int",
"LowStar.Regional.__proj__Rgl__item__repr",
"LowStar.RVector.as_seq_seq",
"LowStar.Regional.__proj__Rgl__item__r_repr"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1))
val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j)))
let rv_loc_elems_included #a #rst #rg h rv i j =
rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
// Properties about disjointness of locations
val rs_loc_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
l:nat{i <= l && l < j && k <> l} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj #a #rst rg rs prid i j k l = ()
val rs_loc_elem_disj_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (
forall (k:nat{i <= k && k < j}).
forall (l:nat{i <= l && l < j && k <> l}).
loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj_forall #a #rst rg rs prid i j = ()
val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2)
let rec rs_loc_elems_elem_disj #a #rst rg rs prid i j k1 k2 l =
if k1 = k2 then ()
else (rs_loc_elem_disj rg rs prid i j (k2 - 1) l;
rs_loc_elems_elem_disj rg rs prid i j k1 (k2 - 1) l)
val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2)
let rec rs_loc_elems_disj #a #rst rg rs prid i j k1 k2 l1 l2 =
if k1 = k2 then ()
else (rs_loc_elems_elem_disj rg rs prid i j l1 l2 (k2 - 1);
rs_loc_elems_disj rg rs prid i j k1 (k2 - 1) l1 l2)
val rv_loc_elems_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
k1:uint32_t{i <= k1} ->
k2:uint32_t{k1 <= k2 && k2 <= j} ->
l1:uint32_t{i <= l1} ->
l2:uint32_t{l1 <= l2 && l2 <= j} ->
Lemma (requires (rv_elems_reg h rv i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rv_loc_elems h rv k1 k2)
(rv_loc_elems h rv l1 l2)))
let rv_loc_elems_disj #a #rst #rg h rv i j k1 k2 l1 l2 =
rs_loc_elems_disj rg (V.as_seq h rv) (V.frameOf rv)
(U32.v i) (U32.v j) (U32.v k1) (U32.v k2) (U32.v l1) (U32.v l2)
val rs_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_region_only false prid)))
(decreases j)
let rec rs_loc_elems_parent_disj #a #rst rg rs prid i j =
if i = j then ()
else rs_loc_elems_parent_disj rg rs prid i (j - 1)
val rv_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_region_only false (V.frameOf rv))))
let rv_loc_elems_parent_disj #a #rst #rg h rv i j =
rs_loc_elems_parent_disj rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val rs_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> drid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (V.forall_seq rs i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_all_regions_from false drid)))
(decreases j)
let rec rs_loc_elems_each_disj #a #rst rg rs drid i j =
if i = j then ()
else rs_loc_elems_each_disj rg rs drid i (j - 1)
val rv_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
drid:HS.rid ->
Lemma (requires (V.forall_ h rv i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_all_regions_from false drid)))
let rv_loc_elems_each_disj #a #rst #rg h rv i j drid =
rs_loc_elems_each_disj rg (V.as_seq h rv) drid (U32.v i) (U32.v j)
// Preservation based on disjointness
val rv_loc_elems_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
loc_disjoint p (V.loc_vector_within rv i j) /\
modifies p h0 h1))
(ensures (rv_loc_elems h0 rv i j ==
rv_loc_elems h1 rv i j))
(decreases (U32.v j))
let rec rv_loc_elems_preserved #a #rst #rg rv i j p h0 h1 =
if i = j then ()
else (V.loc_vector_within_includes rv i j (j - 1ul) j;
V.get_preserved rv (j - 1ul) p h0 h1;
assert (V.get h0 rv (j - 1ul) == V.get h1 rv (j - 1ul));
V.loc_vector_within_includes rv i j i (j - 1ul);
rv_loc_elems_preserved rv i (j - 1ul) p h0 h1)
val rs_elems_inv_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv rg h1 rs i j))
(decreases j)
let rec rs_elems_inv_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val rv_elems_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (V.loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
modifies p h0 h1))
(ensures (rv_elems_inv h1 rv i j))
let rv_elems_inv_preserved #a #rst #rg rv i j p h0 h1 =
rs_elems_inv_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val rv_inv_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_ #a #rst #rg rv p h0 h1 =
rv_elems_inv_preserved #a #rst #rg rv 0ul (V.size_of rv) p h0 h1
// The first core lemma of `rvector`
val rv_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let rv_inv_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
rv_inv_preserved_ rv p h0 h1
val rv_inv_preserved_int:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
modifies (loc_all_regions_from false
(Rgl?.region_of rg (V.get h0 rv i))) h0 h1 /\
rg_inv rg h1 (V.get h1 rv i)))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_int #a #rst #rg rv i h0 h1 =
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv)) 0 (U32.v i) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) 0 (U32.v i)
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1;
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv))
(U32.v i + 1) (U32.v (V.size_of rv)) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) (U32.v i + 1) (U32.v (V.size_of rv))
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1
/// Representation
val as_seq_seq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = j - i})
(decreases j)
let rec as_seq_seq #a #rst rg h rs i j =
if i = j then S.empty
else S.snoc (as_seq_seq rg h rs i (j - 1))
(Rgl?.r_repr rg h (S.index rs (j - 1)))
val as_seq_sub:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t ->
j:uint32_t{
i <= j /\
j <= V.size_of rv /\
rv_elems_inv h rv i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v j - U32.v i})
(decreases (U32.v j))
let as_seq_sub #a #rst #rg h rv i j =
as_seq_seq rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v (V.size_of rv)})
let as_seq #a #rst #rg h rv =
as_seq_sub h rv 0ul (V.size_of rv)
val as_seq_sub_as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
Lemma (S.equal (as_seq_sub h rv 0ul (V.size_of rv))
(as_seq h rv))
[SMTPat (as_seq_sub h rv 0ul (V.size_of rv))]
let as_seq_sub_as_seq #a #rst #rg h rv = ()
val as_seq_seq_index:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat{k < j - i} ->
Lemma (requires true)
(ensures (S.index (as_seq_seq rg h rs i j) k ==
Rgl?.r_repr rg h (S.index rs (i + k))))
(decreases j)
[SMTPat (S.index (as_seq_seq rg h rs i j) k)]
let rec as_seq_seq_index #a #rst rg h rs i j k =
if i = j then ()
else if k = j - i - 1 then ()
else as_seq_seq_index rg h rs i (j - 1) k
val as_seq_seq_eq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs1:S.seq a -> rs2:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs1 /\ rs_elems_inv rg h rs1 i j} ->
k:nat ->
l:nat{k <= l /\ l <= S.length rs2 /\ rs_elems_inv rg h rs2 k l} ->
Lemma (requires (S.equal (S.slice rs1 i j) (S.slice rs2 k l)))
(ensures (S.equal (as_seq_seq rg h rs1 i j)
(as_seq_seq rg h rs2 k l))) | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val as_seq_seq_eq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs1:S.seq a -> rs2:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs1 /\ rs_elems_inv rg h rs1 i j} ->
k:nat ->
l:nat{k <= l /\ l <= S.length rs2 /\ rs_elems_inv rg h rs2 k l} ->
Lemma (requires (S.equal (S.slice rs1 i j) (S.slice rs2 k l)))
(ensures (S.equal (as_seq_seq rg h rs1 i j)
(as_seq_seq rg h rs2 k l))) | [] | LowStar.RVector.as_seq_seq_eq | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
rg: LowStar.Regional.regional rst a ->
h: FStar.Monotonic.HyperStack.mem ->
rs1: FStar.Seq.Base.seq a ->
rs2: FStar.Seq.Base.seq a ->
i: FStar.Integers.nat ->
j:
FStar.Integers.nat
{i <= j /\ j <= FStar.Seq.Base.length rs1 /\ LowStar.RVector.rs_elems_inv rg h rs1 i j} ->
k: FStar.Integers.nat ->
l:
FStar.Integers.nat
{k <= l /\ l <= FStar.Seq.Base.length rs2 /\ LowStar.RVector.rs_elems_inv rg h rs2 k l}
-> FStar.Pervasives.Lemma
(requires FStar.Seq.Base.equal (FStar.Seq.Base.slice rs1 i j) (FStar.Seq.Base.slice rs2 k l))
(ensures
FStar.Seq.Base.equal (LowStar.RVector.as_seq_seq rg h rs1 i j)
(LowStar.RVector.as_seq_seq rg h rs2 k l)) | {
"end_col": 54,
"end_line": 580,
"start_col": 2,
"start_line": 568
} |
Prims.GTot | val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)) | val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j = | false | null | false | if i = j then loc_none else loc_union (rs_loc_elems rg rs i (j - 1)) (rs_loc_elem rg rs (j - 1)) | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"sometrivial",
""
] | [
"LowStar.Regional.regional",
"FStar.Seq.Base.seq",
"FStar.Integers.nat",
"Prims.b2t",
"Prims.op_AmpAmp",
"FStar.Integers.op_Less_Equals",
"FStar.Integers.Signed",
"FStar.Integers.Winfinite",
"FStar.Seq.Base.length",
"Prims.op_Equality",
"LowStar.Monotonic.Buffer.loc_none",
"Prims.bool",
"LowStar.Monotonic.Buffer.loc_union",
"LowStar.RVector.rs_loc_elems",
"FStar.Integers.op_Subtraction",
"LowStar.RVector.rs_loc_elem",
"LowStar.Monotonic.Buffer.loc"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j) | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j) | [
"recursion"
] | LowStar.RVector.rs_loc_elems | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
rg: LowStar.Regional.regional rst a ->
rs: FStar.Seq.Base.seq a ->
i: FStar.Integers.nat ->
j: FStar.Integers.nat{i <= j && j <= FStar.Seq.Base.length rs}
-> Prims.GTot LowStar.Monotonic.Buffer.loc | {
"end_col": 44,
"end_line": 175,
"start_col": 2,
"start_line": 173
} |
FStar.Pervasives.Lemma | val as_seq_seq_upd:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{
i <= j /\
j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
k:nat{i <= k && k < j} -> v:a{rg_inv rg h v} ->
Lemma (S.equal (as_seq_seq rg h (S.upd rs k v) i j)
(S.upd (as_seq_seq rg h rs i j) (k - i)
(Rgl?.r_repr rg h v))) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rec as_seq_seq_upd #a #rst rg h rs i j k v =
if i = j then ()
else if k = j - 1 then ()
else as_seq_seq_upd rg h rs i (j - 1) k v | val as_seq_seq_upd:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{
i <= j /\
j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
k:nat{i <= k && k < j} -> v:a{rg_inv rg h v} ->
Lemma (S.equal (as_seq_seq rg h (S.upd rs k v) i j)
(S.upd (as_seq_seq rg h rs i j) (k - i)
(Rgl?.r_repr rg h v)))
let rec as_seq_seq_upd #a #rst rg h rs i j k v = | false | null | true | if i = j then () else if k = j - 1 then () else as_seq_seq_upd rg h rs i (j - 1) k v | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"lemma"
] | [
"LowStar.Regional.regional",
"FStar.Monotonic.HyperStack.mem",
"FStar.Seq.Base.seq",
"FStar.Integers.nat",
"Prims.l_and",
"Prims.b2t",
"FStar.Integers.op_Less_Equals",
"FStar.Integers.Signed",
"FStar.Integers.Winfinite",
"FStar.Seq.Base.length",
"LowStar.RVector.rs_elems_inv",
"Prims.op_AmpAmp",
"FStar.Integers.op_Less",
"LowStar.Regional.rg_inv",
"Prims.op_Equality",
"Prims.bool",
"FStar.Integers.int_t",
"FStar.Integers.op_Subtraction",
"LowStar.RVector.as_seq_seq_upd",
"Prims.unit"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1))
val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j)))
let rv_loc_elems_included #a #rst #rg h rv i j =
rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
// Properties about disjointness of locations
val rs_loc_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
l:nat{i <= l && l < j && k <> l} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj #a #rst rg rs prid i j k l = ()
val rs_loc_elem_disj_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (
forall (k:nat{i <= k && k < j}).
forall (l:nat{i <= l && l < j && k <> l}).
loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj_forall #a #rst rg rs prid i j = ()
val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2)
let rec rs_loc_elems_elem_disj #a #rst rg rs prid i j k1 k2 l =
if k1 = k2 then ()
else (rs_loc_elem_disj rg rs prid i j (k2 - 1) l;
rs_loc_elems_elem_disj rg rs prid i j k1 (k2 - 1) l)
val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2)
let rec rs_loc_elems_disj #a #rst rg rs prid i j k1 k2 l1 l2 =
if k1 = k2 then ()
else (rs_loc_elems_elem_disj rg rs prid i j l1 l2 (k2 - 1);
rs_loc_elems_disj rg rs prid i j k1 (k2 - 1) l1 l2)
val rv_loc_elems_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
k1:uint32_t{i <= k1} ->
k2:uint32_t{k1 <= k2 && k2 <= j} ->
l1:uint32_t{i <= l1} ->
l2:uint32_t{l1 <= l2 && l2 <= j} ->
Lemma (requires (rv_elems_reg h rv i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rv_loc_elems h rv k1 k2)
(rv_loc_elems h rv l1 l2)))
let rv_loc_elems_disj #a #rst #rg h rv i j k1 k2 l1 l2 =
rs_loc_elems_disj rg (V.as_seq h rv) (V.frameOf rv)
(U32.v i) (U32.v j) (U32.v k1) (U32.v k2) (U32.v l1) (U32.v l2)
val rs_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_region_only false prid)))
(decreases j)
let rec rs_loc_elems_parent_disj #a #rst rg rs prid i j =
if i = j then ()
else rs_loc_elems_parent_disj rg rs prid i (j - 1)
val rv_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_region_only false (V.frameOf rv))))
let rv_loc_elems_parent_disj #a #rst #rg h rv i j =
rs_loc_elems_parent_disj rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val rs_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> drid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (V.forall_seq rs i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_all_regions_from false drid)))
(decreases j)
let rec rs_loc_elems_each_disj #a #rst rg rs drid i j =
if i = j then ()
else rs_loc_elems_each_disj rg rs drid i (j - 1)
val rv_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
drid:HS.rid ->
Lemma (requires (V.forall_ h rv i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_all_regions_from false drid)))
let rv_loc_elems_each_disj #a #rst #rg h rv i j drid =
rs_loc_elems_each_disj rg (V.as_seq h rv) drid (U32.v i) (U32.v j)
// Preservation based on disjointness
val rv_loc_elems_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
loc_disjoint p (V.loc_vector_within rv i j) /\
modifies p h0 h1))
(ensures (rv_loc_elems h0 rv i j ==
rv_loc_elems h1 rv i j))
(decreases (U32.v j))
let rec rv_loc_elems_preserved #a #rst #rg rv i j p h0 h1 =
if i = j then ()
else (V.loc_vector_within_includes rv i j (j - 1ul) j;
V.get_preserved rv (j - 1ul) p h0 h1;
assert (V.get h0 rv (j - 1ul) == V.get h1 rv (j - 1ul));
V.loc_vector_within_includes rv i j i (j - 1ul);
rv_loc_elems_preserved rv i (j - 1ul) p h0 h1)
val rs_elems_inv_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv rg h1 rs i j))
(decreases j)
let rec rs_elems_inv_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val rv_elems_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (V.loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
modifies p h0 h1))
(ensures (rv_elems_inv h1 rv i j))
let rv_elems_inv_preserved #a #rst #rg rv i j p h0 h1 =
rs_elems_inv_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val rv_inv_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_ #a #rst #rg rv p h0 h1 =
rv_elems_inv_preserved #a #rst #rg rv 0ul (V.size_of rv) p h0 h1
// The first core lemma of `rvector`
val rv_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let rv_inv_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
rv_inv_preserved_ rv p h0 h1
val rv_inv_preserved_int:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
modifies (loc_all_regions_from false
(Rgl?.region_of rg (V.get h0 rv i))) h0 h1 /\
rg_inv rg h1 (V.get h1 rv i)))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_int #a #rst #rg rv i h0 h1 =
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv)) 0 (U32.v i) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) 0 (U32.v i)
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1;
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv))
(U32.v i + 1) (U32.v (V.size_of rv)) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) (U32.v i + 1) (U32.v (V.size_of rv))
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1
/// Representation
val as_seq_seq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = j - i})
(decreases j)
let rec as_seq_seq #a #rst rg h rs i j =
if i = j then S.empty
else S.snoc (as_seq_seq rg h rs i (j - 1))
(Rgl?.r_repr rg h (S.index rs (j - 1)))
val as_seq_sub:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t ->
j:uint32_t{
i <= j /\
j <= V.size_of rv /\
rv_elems_inv h rv i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v j - U32.v i})
(decreases (U32.v j))
let as_seq_sub #a #rst #rg h rv i j =
as_seq_seq rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v (V.size_of rv)})
let as_seq #a #rst #rg h rv =
as_seq_sub h rv 0ul (V.size_of rv)
val as_seq_sub_as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
Lemma (S.equal (as_seq_sub h rv 0ul (V.size_of rv))
(as_seq h rv))
[SMTPat (as_seq_sub h rv 0ul (V.size_of rv))]
let as_seq_sub_as_seq #a #rst #rg h rv = ()
val as_seq_seq_index:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat{k < j - i} ->
Lemma (requires true)
(ensures (S.index (as_seq_seq rg h rs i j) k ==
Rgl?.r_repr rg h (S.index rs (i + k))))
(decreases j)
[SMTPat (S.index (as_seq_seq rg h rs i j) k)]
let rec as_seq_seq_index #a #rst rg h rs i j k =
if i = j then ()
else if k = j - i - 1 then ()
else as_seq_seq_index rg h rs i (j - 1) k
val as_seq_seq_eq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs1:S.seq a -> rs2:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs1 /\ rs_elems_inv rg h rs1 i j} ->
k:nat ->
l:nat{k <= l /\ l <= S.length rs2 /\ rs_elems_inv rg h rs2 k l} ->
Lemma (requires (S.equal (S.slice rs1 i j) (S.slice rs2 k l)))
(ensures (S.equal (as_seq_seq rg h rs1 i j)
(as_seq_seq rg h rs2 k l)))
let as_seq_seq_eq #a #rst rg h rs1 rs2 i j k l =
assert (forall (a:nat{a < j - i}).
S.index (as_seq_seq rg h rs1 i j) a ==
Rgl?.r_repr rg h (S.index rs1 (i + a)));
assert (forall (a:nat{a < l - k}).
S.index (as_seq_seq rg h rs2 k l) a ==
Rgl?.r_repr rg h (S.index rs2 (k + a)));
assert (S.length (S.slice rs1 i j) = j - i);
assert (S.length (S.slice rs2 k l) = l - k);
assert (forall (a:nat{a < j - i}).
S.index (S.slice rs1 i j) a ==
S.index (S.slice rs2 k l) a);
assert (forall (a:nat{a < j - i}).
S.index rs1 (i + a) == S.index rs2 (k + a))
val as_seq_seq_slice:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat -> l:nat{k <= l && l <= j - i} ->
Lemma (S.equal (S.slice (as_seq_seq rg h rs i j) k l)
(as_seq_seq rg h (S.slice rs (i + k) (i + l)) 0 (l - k)))
#reset-options "--z3rlimit 10"
let rec as_seq_seq_slice #a #rst rg h rs i j k l =
if k = l then ()
else (as_seq_seq_slice rg h rs i j k (l - 1);
as_seq_seq_index rg h rs i j (l - 1);
as_seq_seq_eq rg h
(S.slice rs (i + k) (i + l - 1))
(S.slice rs (i + k) (i + l))
0 (l - k - 1) 0 (l - k - 1))
val as_seq_seq_upd:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{
i <= j /\
j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
k:nat{i <= k && k < j} -> v:a{rg_inv rg h v} ->
Lemma (S.equal (as_seq_seq rg h (S.upd rs k v) i j)
(S.upd (as_seq_seq rg h rs i j) (k - i)
(Rgl?.r_repr rg h v))) | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 10,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val as_seq_seq_upd:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{
i <= j /\
j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
k:nat{i <= k && k < j} -> v:a{rg_inv rg h v} ->
Lemma (S.equal (as_seq_seq rg h (S.upd rs k v) i j)
(S.upd (as_seq_seq rg h rs i j) (k - i)
(Rgl?.r_repr rg h v))) | [
"recursion"
] | LowStar.RVector.as_seq_seq_upd | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
rg: LowStar.Regional.regional rst a ->
h: FStar.Monotonic.HyperStack.mem ->
rs: FStar.Seq.Base.seq a ->
i: FStar.Integers.nat ->
j:
FStar.Integers.nat
{i <= j /\ j <= FStar.Seq.Base.length rs /\ LowStar.RVector.rs_elems_inv rg h rs i j} ->
k: FStar.Integers.nat{i <= k && k < j} ->
v: a{LowStar.Regional.rg_inv rg h v}
-> FStar.Pervasives.Lemma
(ensures
FStar.Seq.Base.equal (LowStar.RVector.as_seq_seq rg h (FStar.Seq.Base.upd rs k v) i j)
(FStar.Seq.Base.upd (LowStar.RVector.as_seq_seq rg h rs i j) (k - i) (Rgl?.r_repr rg h v))) | {
"end_col": 43,
"end_line": 614,
"start_col": 2,
"start_line": 612
} |
Prims.GTot | val as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v (V.size_of rv)}) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let as_seq #a #rst #rg h rv =
as_seq_sub h rv 0ul (V.size_of rv) | val as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v (V.size_of rv)})
let as_seq #a #rst #rg h rv = | false | null | false | as_seq_sub h rv 0ul (V.size_of rv) | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"sometrivial"
] | [
"LowStar.Regional.regional",
"FStar.Monotonic.HyperStack.mem",
"LowStar.RVector.rvector",
"LowStar.RVector.rv_inv",
"LowStar.RVector.as_seq_sub",
"FStar.UInt32.__uint_to_t",
"LowStar.Vector.size_of",
"FStar.Seq.Base.seq",
"LowStar.Regional.__proj__Rgl__item__repr",
"Prims.b2t",
"Prims.op_Equality",
"Prims.int",
"Prims.l_or",
"Prims.op_GreaterThanOrEqual",
"FStar.UInt.size",
"FStar.UInt32.n",
"FStar.Seq.Base.length",
"FStar.UInt32.v"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1))
val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j)))
let rv_loc_elems_included #a #rst #rg h rv i j =
rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
// Properties about disjointness of locations
val rs_loc_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
l:nat{i <= l && l < j && k <> l} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj #a #rst rg rs prid i j k l = ()
val rs_loc_elem_disj_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (
forall (k:nat{i <= k && k < j}).
forall (l:nat{i <= l && l < j && k <> l}).
loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj_forall #a #rst rg rs prid i j = ()
val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2)
let rec rs_loc_elems_elem_disj #a #rst rg rs prid i j k1 k2 l =
if k1 = k2 then ()
else (rs_loc_elem_disj rg rs prid i j (k2 - 1) l;
rs_loc_elems_elem_disj rg rs prid i j k1 (k2 - 1) l)
val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2)
let rec rs_loc_elems_disj #a #rst rg rs prid i j k1 k2 l1 l2 =
if k1 = k2 then ()
else (rs_loc_elems_elem_disj rg rs prid i j l1 l2 (k2 - 1);
rs_loc_elems_disj rg rs prid i j k1 (k2 - 1) l1 l2)
val rv_loc_elems_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
k1:uint32_t{i <= k1} ->
k2:uint32_t{k1 <= k2 && k2 <= j} ->
l1:uint32_t{i <= l1} ->
l2:uint32_t{l1 <= l2 && l2 <= j} ->
Lemma (requires (rv_elems_reg h rv i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rv_loc_elems h rv k1 k2)
(rv_loc_elems h rv l1 l2)))
let rv_loc_elems_disj #a #rst #rg h rv i j k1 k2 l1 l2 =
rs_loc_elems_disj rg (V.as_seq h rv) (V.frameOf rv)
(U32.v i) (U32.v j) (U32.v k1) (U32.v k2) (U32.v l1) (U32.v l2)
val rs_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_region_only false prid)))
(decreases j)
let rec rs_loc_elems_parent_disj #a #rst rg rs prid i j =
if i = j then ()
else rs_loc_elems_parent_disj rg rs prid i (j - 1)
val rv_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_region_only false (V.frameOf rv))))
let rv_loc_elems_parent_disj #a #rst #rg h rv i j =
rs_loc_elems_parent_disj rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val rs_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> drid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (V.forall_seq rs i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_all_regions_from false drid)))
(decreases j)
let rec rs_loc_elems_each_disj #a #rst rg rs drid i j =
if i = j then ()
else rs_loc_elems_each_disj rg rs drid i (j - 1)
val rv_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
drid:HS.rid ->
Lemma (requires (V.forall_ h rv i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_all_regions_from false drid)))
let rv_loc_elems_each_disj #a #rst #rg h rv i j drid =
rs_loc_elems_each_disj rg (V.as_seq h rv) drid (U32.v i) (U32.v j)
// Preservation based on disjointness
val rv_loc_elems_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
loc_disjoint p (V.loc_vector_within rv i j) /\
modifies p h0 h1))
(ensures (rv_loc_elems h0 rv i j ==
rv_loc_elems h1 rv i j))
(decreases (U32.v j))
let rec rv_loc_elems_preserved #a #rst #rg rv i j p h0 h1 =
if i = j then ()
else (V.loc_vector_within_includes rv i j (j - 1ul) j;
V.get_preserved rv (j - 1ul) p h0 h1;
assert (V.get h0 rv (j - 1ul) == V.get h1 rv (j - 1ul));
V.loc_vector_within_includes rv i j i (j - 1ul);
rv_loc_elems_preserved rv i (j - 1ul) p h0 h1)
val rs_elems_inv_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv rg h1 rs i j))
(decreases j)
let rec rs_elems_inv_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val rv_elems_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (V.loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
modifies p h0 h1))
(ensures (rv_elems_inv h1 rv i j))
let rv_elems_inv_preserved #a #rst #rg rv i j p h0 h1 =
rs_elems_inv_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val rv_inv_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_ #a #rst #rg rv p h0 h1 =
rv_elems_inv_preserved #a #rst #rg rv 0ul (V.size_of rv) p h0 h1
// The first core lemma of `rvector`
val rv_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let rv_inv_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
rv_inv_preserved_ rv p h0 h1
val rv_inv_preserved_int:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
modifies (loc_all_regions_from false
(Rgl?.region_of rg (V.get h0 rv i))) h0 h1 /\
rg_inv rg h1 (V.get h1 rv i)))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_int #a #rst #rg rv i h0 h1 =
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv)) 0 (U32.v i) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) 0 (U32.v i)
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1;
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv))
(U32.v i + 1) (U32.v (V.size_of rv)) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) (U32.v i + 1) (U32.v (V.size_of rv))
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1
/// Representation
val as_seq_seq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = j - i})
(decreases j)
let rec as_seq_seq #a #rst rg h rs i j =
if i = j then S.empty
else S.snoc (as_seq_seq rg h rs i (j - 1))
(Rgl?.r_repr rg h (S.index rs (j - 1)))
val as_seq_sub:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t ->
j:uint32_t{
i <= j /\
j <= V.size_of rv /\
rv_elems_inv h rv i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v j - U32.v i})
(decreases (U32.v j))
let as_seq_sub #a #rst #rg h rv i j =
as_seq_seq rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v (V.size_of rv)}) | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v (V.size_of rv)}) | [] | LowStar.RVector.as_seq | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | h: FStar.Monotonic.HyperStack.mem -> rv: LowStar.RVector.rvector rg {LowStar.RVector.rv_inv h rv}
-> Prims.GTot
(s:
FStar.Seq.Base.seq (Rgl?.repr rg)
{FStar.Seq.Base.length s = FStar.UInt32.v (LowStar.Vector.size_of rv)}) | {
"end_col": 36,
"end_line": 531,
"start_col": 2,
"start_line": 531
} |
FStar.Pervasives.Lemma | val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k)) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k | val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k = | false | null | true | if k = j - 1 then () else rs_loc_elems_includes #a #rst rg rs i (j - 1) k | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"lemma"
] | [
"LowStar.Regional.regional",
"FStar.Seq.Base.seq",
"FStar.Integers.nat",
"Prims.b2t",
"Prims.op_AmpAmp",
"FStar.Integers.op_Less_Equals",
"FStar.Integers.Signed",
"FStar.Integers.Winfinite",
"FStar.Seq.Base.length",
"FStar.Integers.op_Less",
"Prims.op_Equality",
"FStar.Integers.int_t",
"FStar.Integers.op_Subtraction",
"Prims.bool",
"LowStar.RVector.rs_loc_elems_includes",
"Prims.unit"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k)) | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k)) | [
"recursion"
] | LowStar.RVector.rs_loc_elems_includes | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
rg: LowStar.Regional.regional rst a ->
rs: FStar.Seq.Base.seq a ->
i: FStar.Integers.nat ->
j: FStar.Integers.nat{i <= j && j <= FStar.Seq.Base.length rs} ->
k: FStar.Integers.nat{i <= k && k < j}
-> FStar.Pervasives.Lemma
(ensures
LowStar.Monotonic.Buffer.loc_includes (LowStar.RVector.rs_loc_elems rg rs i j)
(LowStar.RVector.rs_loc_elem rg rs k)) | {
"end_col": 54,
"end_line": 232,
"start_col": 2,
"start_line": 231
} |
FStar.HyperStack.ST.ST | val assign_copy:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> cp:copyable #rst a rg ->
rv:rvector rg ->
i:uint32_t{i < V.size_of rv} -> v:a ->
HST.ST unit
(requires (fun h0 ->
rv_inv h0 rv /\
rg_inv rg h0 v /\
HS.disjoint (Rgl?.region_of rg v) (V.frameOf rv)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg (V.get h1 rv i))) h0 h1 /\
rv_inv h1 rv /\
S.equal (as_seq h1 rv)
(S.upd (as_seq h0 rv) (U32.v i) (Rgl?.r_repr rg h0 v)))) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let assign_copy #a #rst #rg cp rv i v =
let hh0 = HST.get () in
Cpy?.copy cp (Rgl?.state rg) v (V.index rv i);
let hh1 = HST.get () in
// Safety
rv_inv_preserved_int #a #rst #rg rv i hh0 hh1;
// Correctness
forall_intro
(move_requires
(rs_loc_elem_disj
rg (V.as_seq hh0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv))
(U32.v i)));
assert (forall (k:nat{k <> U32.v i && k < U32.v (V.size_of rv)}).
loc_disjoint (rs_loc_elem rg (V.as_seq hh0 rv) k)
(rs_loc_elem rg (V.as_seq hh0 rv) (U32.v i)));
forall_intro
(move_requires
(r_sep_forall
rg (rs_loc_elem rg (V.as_seq hh0 rv) (U32.v i))
hh0 hh1));
assert (forall (k:nat{k <> U32.v i && k < U32.v (V.size_of rv)}).
loc_disjoint (rs_loc_elem rg (V.as_seq hh0 rv) k)
(rs_loc_elem rg (V.as_seq hh0 rv) (U32.v i)) ==>
Rgl?.r_repr rg hh1 (S.index (V.as_seq hh1 rv) k) ==
Rgl?.r_repr rg hh0 (S.index (V.as_seq hh0 rv) k));
assert (forall (k:nat{k <> U32.v i && k < U32.v (V.size_of rv)}).
Rgl?.r_repr rg hh1 (S.index (V.as_seq hh1 rv) k) ==
Rgl?.r_repr rg hh0 (S.index (V.as_seq hh0 rv) k));
assert (forall (k:nat{k <> U32.v i && k < U32.v (V.size_of rv)}).
S.index (as_seq_seq rg hh1 (V.as_seq hh1 rv)
0 (U32.v (V.size_of rv))) k ==
S.index (as_seq_seq rg hh0 (V.as_seq hh0 rv)
0 (U32.v (V.size_of rv))) k) | val assign_copy:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> cp:copyable #rst a rg ->
rv:rvector rg ->
i:uint32_t{i < V.size_of rv} -> v:a ->
HST.ST unit
(requires (fun h0 ->
rv_inv h0 rv /\
rg_inv rg h0 v /\
HS.disjoint (Rgl?.region_of rg v) (V.frameOf rv)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg (V.get h1 rv i))) h0 h1 /\
rv_inv h1 rv /\
S.equal (as_seq h1 rv)
(S.upd (as_seq h0 rv) (U32.v i) (Rgl?.r_repr rg h0 v))))
let assign_copy #a #rst #rg cp rv i v = | true | null | false | let hh0 = HST.get () in
Cpy?.copy cp (Rgl?.state rg) v (V.index rv i);
let hh1 = HST.get () in
rv_inv_preserved_int #a #rst #rg rv i hh0 hh1;
forall_intro (move_requires (rs_loc_elem_disj rg
(V.as_seq hh0 rv)
(V.frameOf rv)
0
(U32.v (V.size_of rv))
(U32.v i)));
assert (forall (k: nat{k <> U32.v i && k < U32.v (V.size_of rv)}).
loc_disjoint (rs_loc_elem rg (V.as_seq hh0 rv) k) (rs_loc_elem rg (V.as_seq hh0 rv) (U32.v i))
);
forall_intro (move_requires (r_sep_forall rg (rs_loc_elem rg (V.as_seq hh0 rv) (U32.v i)) hh0 hh1));
assert (forall (k: nat{k <> U32.v i && k < U32.v (V.size_of rv)}).
loc_disjoint (rs_loc_elem rg (V.as_seq hh0 rv) k) (rs_loc_elem rg (V.as_seq hh0 rv) (U32.v i)) ==>
Rgl?.r_repr rg hh1 (S.index (V.as_seq hh1 rv) k) ==
Rgl?.r_repr rg hh0 (S.index (V.as_seq hh0 rv) k));
assert (forall (k: nat{k <> U32.v i && k < U32.v (V.size_of rv)}).
Rgl?.r_repr rg hh1 (S.index (V.as_seq hh1 rv) k) ==
Rgl?.r_repr rg hh0 (S.index (V.as_seq hh0 rv) k));
assert (forall (k: nat{k <> U32.v i && k < U32.v (V.size_of rv)}).
S.index (as_seq_seq rg hh1 (V.as_seq hh1 rv) 0 (U32.v (V.size_of rv))) k ==
S.index (as_seq_seq rg hh0 (V.as_seq hh0 rv) 0 (U32.v (V.size_of rv))) k) | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [] | [
"LowStar.Regional.regional",
"LowStar.RVector.copyable",
"LowStar.RVector.rvector",
"LowStar.Vector.uint32_t",
"Prims.b2t",
"FStar.Integers.op_Less",
"FStar.Integers.Unsigned",
"FStar.Integers.W32",
"LowStar.Vector.size_of",
"Prims._assert",
"Prims.l_Forall",
"FStar.Integers.nat",
"Prims.op_AmpAmp",
"Prims.op_disEquality",
"Prims.int",
"Prims.l_or",
"FStar.Integers.op_Greater_Equals",
"FStar.Integers.Signed",
"FStar.Integers.Winfinite",
"FStar.UInt.size",
"FStar.UInt32.n",
"FStar.UInt32.v",
"Prims.eq2",
"LowStar.Regional.__proj__Rgl__item__repr",
"FStar.Seq.Base.index",
"LowStar.RVector.as_seq_seq",
"LowStar.Vector.as_seq",
"Prims.unit",
"LowStar.Regional.__proj__Rgl__item__r_repr",
"Prims.l_imp",
"LowStar.Monotonic.Buffer.loc_disjoint",
"LowStar.RVector.rs_loc_elem",
"FStar.Classical.forall_intro",
"LowStar.Regional.rg_inv",
"Prims.l_and",
"LowStar.Monotonic.Buffer.loc_all_regions_from",
"LowStar.Regional.__proj__Rgl__item__region_of",
"LowStar.Monotonic.Buffer.modifies",
"FStar.Classical.move_requires",
"LowStar.RVector.r_sep_forall",
"FStar.Integers.op_Less_Equals",
"LowStar.RVector.rs_elems_reg",
"LowStar.Vector.frameOf",
"LowStar.RVector.rs_loc_elem_disj",
"LowStar.RVector.rv_inv_preserved_int",
"FStar.Monotonic.HyperStack.mem",
"FStar.HyperStack.ST.get",
"LowStar.RVector.__proj__Cpy__item__copy",
"LowStar.Regional.__proj__Rgl__item__state",
"LowStar.Vector.index"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1))
val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j)))
let rv_loc_elems_included #a #rst #rg h rv i j =
rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
// Properties about disjointness of locations
val rs_loc_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
l:nat{i <= l && l < j && k <> l} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj #a #rst rg rs prid i j k l = ()
val rs_loc_elem_disj_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (
forall (k:nat{i <= k && k < j}).
forall (l:nat{i <= l && l < j && k <> l}).
loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj_forall #a #rst rg rs prid i j = ()
val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2)
let rec rs_loc_elems_elem_disj #a #rst rg rs prid i j k1 k2 l =
if k1 = k2 then ()
else (rs_loc_elem_disj rg rs prid i j (k2 - 1) l;
rs_loc_elems_elem_disj rg rs prid i j k1 (k2 - 1) l)
val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2)
let rec rs_loc_elems_disj #a #rst rg rs prid i j k1 k2 l1 l2 =
if k1 = k2 then ()
else (rs_loc_elems_elem_disj rg rs prid i j l1 l2 (k2 - 1);
rs_loc_elems_disj rg rs prid i j k1 (k2 - 1) l1 l2)
val rv_loc_elems_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
k1:uint32_t{i <= k1} ->
k2:uint32_t{k1 <= k2 && k2 <= j} ->
l1:uint32_t{i <= l1} ->
l2:uint32_t{l1 <= l2 && l2 <= j} ->
Lemma (requires (rv_elems_reg h rv i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rv_loc_elems h rv k1 k2)
(rv_loc_elems h rv l1 l2)))
let rv_loc_elems_disj #a #rst #rg h rv i j k1 k2 l1 l2 =
rs_loc_elems_disj rg (V.as_seq h rv) (V.frameOf rv)
(U32.v i) (U32.v j) (U32.v k1) (U32.v k2) (U32.v l1) (U32.v l2)
val rs_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_region_only false prid)))
(decreases j)
let rec rs_loc_elems_parent_disj #a #rst rg rs prid i j =
if i = j then ()
else rs_loc_elems_parent_disj rg rs prid i (j - 1)
val rv_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_region_only false (V.frameOf rv))))
let rv_loc_elems_parent_disj #a #rst #rg h rv i j =
rs_loc_elems_parent_disj rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val rs_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> drid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (V.forall_seq rs i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_all_regions_from false drid)))
(decreases j)
let rec rs_loc_elems_each_disj #a #rst rg rs drid i j =
if i = j then ()
else rs_loc_elems_each_disj rg rs drid i (j - 1)
val rv_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
drid:HS.rid ->
Lemma (requires (V.forall_ h rv i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_all_regions_from false drid)))
let rv_loc_elems_each_disj #a #rst #rg h rv i j drid =
rs_loc_elems_each_disj rg (V.as_seq h rv) drid (U32.v i) (U32.v j)
// Preservation based on disjointness
val rv_loc_elems_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
loc_disjoint p (V.loc_vector_within rv i j) /\
modifies p h0 h1))
(ensures (rv_loc_elems h0 rv i j ==
rv_loc_elems h1 rv i j))
(decreases (U32.v j))
let rec rv_loc_elems_preserved #a #rst #rg rv i j p h0 h1 =
if i = j then ()
else (V.loc_vector_within_includes rv i j (j - 1ul) j;
V.get_preserved rv (j - 1ul) p h0 h1;
assert (V.get h0 rv (j - 1ul) == V.get h1 rv (j - 1ul));
V.loc_vector_within_includes rv i j i (j - 1ul);
rv_loc_elems_preserved rv i (j - 1ul) p h0 h1)
val rs_elems_inv_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv rg h1 rs i j))
(decreases j)
let rec rs_elems_inv_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val rv_elems_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (V.loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
modifies p h0 h1))
(ensures (rv_elems_inv h1 rv i j))
let rv_elems_inv_preserved #a #rst #rg rv i j p h0 h1 =
rs_elems_inv_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val rv_inv_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_ #a #rst #rg rv p h0 h1 =
rv_elems_inv_preserved #a #rst #rg rv 0ul (V.size_of rv) p h0 h1
// The first core lemma of `rvector`
val rv_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let rv_inv_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
rv_inv_preserved_ rv p h0 h1
val rv_inv_preserved_int:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
modifies (loc_all_regions_from false
(Rgl?.region_of rg (V.get h0 rv i))) h0 h1 /\
rg_inv rg h1 (V.get h1 rv i)))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_int #a #rst #rg rv i h0 h1 =
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv)) 0 (U32.v i) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) 0 (U32.v i)
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1;
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv))
(U32.v i + 1) (U32.v (V.size_of rv)) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) (U32.v i + 1) (U32.v (V.size_of rv))
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1
/// Representation
val as_seq_seq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = j - i})
(decreases j)
let rec as_seq_seq #a #rst rg h rs i j =
if i = j then S.empty
else S.snoc (as_seq_seq rg h rs i (j - 1))
(Rgl?.r_repr rg h (S.index rs (j - 1)))
val as_seq_sub:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t ->
j:uint32_t{
i <= j /\
j <= V.size_of rv /\
rv_elems_inv h rv i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v j - U32.v i})
(decreases (U32.v j))
let as_seq_sub #a #rst #rg h rv i j =
as_seq_seq rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v (V.size_of rv)})
let as_seq #a #rst #rg h rv =
as_seq_sub h rv 0ul (V.size_of rv)
val as_seq_sub_as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
Lemma (S.equal (as_seq_sub h rv 0ul (V.size_of rv))
(as_seq h rv))
[SMTPat (as_seq_sub h rv 0ul (V.size_of rv))]
let as_seq_sub_as_seq #a #rst #rg h rv = ()
val as_seq_seq_index:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat{k < j - i} ->
Lemma (requires true)
(ensures (S.index (as_seq_seq rg h rs i j) k ==
Rgl?.r_repr rg h (S.index rs (i + k))))
(decreases j)
[SMTPat (S.index (as_seq_seq rg h rs i j) k)]
let rec as_seq_seq_index #a #rst rg h rs i j k =
if i = j then ()
else if k = j - i - 1 then ()
else as_seq_seq_index rg h rs i (j - 1) k
val as_seq_seq_eq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs1:S.seq a -> rs2:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs1 /\ rs_elems_inv rg h rs1 i j} ->
k:nat ->
l:nat{k <= l /\ l <= S.length rs2 /\ rs_elems_inv rg h rs2 k l} ->
Lemma (requires (S.equal (S.slice rs1 i j) (S.slice rs2 k l)))
(ensures (S.equal (as_seq_seq rg h rs1 i j)
(as_seq_seq rg h rs2 k l)))
let as_seq_seq_eq #a #rst rg h rs1 rs2 i j k l =
assert (forall (a:nat{a < j - i}).
S.index (as_seq_seq rg h rs1 i j) a ==
Rgl?.r_repr rg h (S.index rs1 (i + a)));
assert (forall (a:nat{a < l - k}).
S.index (as_seq_seq rg h rs2 k l) a ==
Rgl?.r_repr rg h (S.index rs2 (k + a)));
assert (S.length (S.slice rs1 i j) = j - i);
assert (S.length (S.slice rs2 k l) = l - k);
assert (forall (a:nat{a < j - i}).
S.index (S.slice rs1 i j) a ==
S.index (S.slice rs2 k l) a);
assert (forall (a:nat{a < j - i}).
S.index rs1 (i + a) == S.index rs2 (k + a))
val as_seq_seq_slice:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat -> l:nat{k <= l && l <= j - i} ->
Lemma (S.equal (S.slice (as_seq_seq rg h rs i j) k l)
(as_seq_seq rg h (S.slice rs (i + k) (i + l)) 0 (l - k)))
#reset-options "--z3rlimit 10"
let rec as_seq_seq_slice #a #rst rg h rs i j k l =
if k = l then ()
else (as_seq_seq_slice rg h rs i j k (l - 1);
as_seq_seq_index rg h rs i j (l - 1);
as_seq_seq_eq rg h
(S.slice rs (i + k) (i + l - 1))
(S.slice rs (i + k) (i + l))
0 (l - k - 1) 0 (l - k - 1))
val as_seq_seq_upd:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{
i <= j /\
j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
k:nat{i <= k && k < j} -> v:a{rg_inv rg h v} ->
Lemma (S.equal (as_seq_seq rg h (S.upd rs k v) i j)
(S.upd (as_seq_seq rg h rs i j) (k - i)
(Rgl?.r_repr rg h v)))
let rec as_seq_seq_upd #a #rst rg h rs i j k v =
if i = j then ()
else if k = j - 1 then ()
else as_seq_seq_upd rg h rs i (j - 1) k v
// Preservation based on disjointness
val as_seq_seq_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv_preserved rg rs i j p h0 h1;
S.equal (as_seq_seq rg h0 rs i j)
(as_seq_seq rg h1 rs i j)))
let rec as_seq_seq_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
as_seq_seq_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val as_seq_sub_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
loc_disjoint p (V.loc_vector rv) /\
modifies p h0 h1))
(ensures (rv_elems_inv_preserved rv i j p h0 h1;
S.equal (as_seq_sub h0 rv i j)
(as_seq_sub h1 rv i j)))
let as_seq_sub_preserved #a #rst #rg rv i j p h0 h1 =
as_seq_seq_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val as_seq_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv_preserved_ rv p h0 h1;
S.equal (as_seq h0 rv) (as_seq h1 rv)))
let as_seq_preserved_ #a #rst #rg rv p h0 h1 =
as_seq_sub_preserved rv 0ul (V.size_of rv) p h0 h1
// The second core lemma of `rvector`
val as_seq_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv_preserved rv p h0 h1;
S.equal (as_seq h0 rv) (as_seq h1 rv)))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let as_seq_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
as_seq_preserved_ rv p h0 h1
/// Construction
val alloc_empty:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 bv h1 -> h0 == h1 /\ V.size_of bv = 0ul))
let alloc_empty #a #rst rg =
V.alloc_empty a
val alloc_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
cidx:uint32_t{cidx <= V.size_of rv} ->
HST.ST unit
(requires (fun h0 -> rv_itself_inv h0 rv))
(ensures (fun h0 _ h1 ->
modifies (V.loc_vector_within rv 0ul cidx) h0 h1 /\
rv_itself_inv h1 rv /\
rv_elems_inv h1 rv 0ul cidx /\
rv_elems_reg h1 rv 0ul cidx /\
S.equal (as_seq_sub h1 rv 0ul cidx)
(S.create (U32.v cidx) (Ghost.reveal (Rgl?.irepr rg))) /\
// the loop invariant for this function
V.forall_ h1 rv 0ul cidx
(fun r -> HS.fresh_region (Rgl?.region_of rg r) h0 h1 /\
Rgl?.r_alloc_p rg r) /\
Set.subset (Map.domain (HS.get_hmap h0))
(Map.domain (HS.get_hmap h1))))
(decreases (U32.v cidx))
#reset-options "--z3rlimit 20"
let rec alloc_ #a #rst #rg rv cidx =
let hh0 = HST.get () in
if cidx = 0ul then ()
else (let nrid = HST.new_region (V.frameOf rv) in
let v = rg_alloc rg nrid in
let hh1 = HST.get () in
V.assign rv (cidx - 1ul) v;
let hh2 = HST.get () in
V.loc_vector_within_included rv (cidx - 1ul) cidx;
Rgl?.r_sep
rg (V.get hh2 rv (cidx - 1ul))
(V.loc_vector_within rv (cidx - 1ul) cidx)
hh1 hh2;
alloc_ rv (cidx - 1ul);
let hh3 = HST.get () in
V.loc_vector_within_included rv 0ul (cidx - 1ul);
Rgl?.r_sep
rg (V.get hh3 rv (cidx - 1ul))
(V.loc_vector_within rv 0ul (cidx - 1ul))
hh2 hh3;
V.forall2_extend hh3 rv 0ul (cidx - 1ul)
(fun r1 r2 -> HS.disjoint (Rgl?.region_of rg r1)
(Rgl?.region_of rg r2));
V.loc_vector_within_union_rev rv 0ul cidx)
val alloc_rid:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} -> rid:HST.erid ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
V.frameOf rv = rid /\
V.size_of rv = len /\
V.forall_all h1 rv (fun r -> Rgl?.r_alloc_p rg r) /\
S.equal (as_seq h1 rv)
(S.create (U32.v len) (Ghost.reveal (Rgl?.irepr rg)))))
let alloc_rid #a #rst rg len rid =
let vec = V.alloc_rid len (rg_dummy rg) rid in
alloc_ #a #rst #rg vec len;
V.loc_vector_within_included vec 0ul len;
vec
val alloc_reserve:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} -> rid:HST.erid ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
V.frameOf rv = rid /\
V.size_of rv = 0ul /\
S.equal (as_seq h1 rv) S.empty /\
Set.equal (Map.domain (HS.get_hmap h0))
(Map.domain (HS.get_hmap h1)) /\
B.fresh_loc (V.loc_vector rv) h0 h1))
let alloc_reserve #a #rst rg len rid =
V.alloc_reserve len (rg_dummy rg) rid
val alloc:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
HS.fresh_region (V.frameOf rv) h0 h1 /\
V.size_of rv = len /\
V.forall_all h1 rv (fun r -> Rgl?.r_alloc_p rg r) /\
S.equal (as_seq h1 rv)
(S.create (U32.v len) (Ghost.reveal (Rgl?.irepr rg)))))
let alloc #a #rst rg len =
let nrid = HST.new_region HS.root in
alloc_rid rg len nrid
val insert:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg{not (V.is_full rv)} -> v:a ->
HST.ST (rvector rg)
(requires (fun h0 ->
rv_inv h0 rv /\ rg_inv rg h0 v /\
HS.extends (Rgl?.region_of rg v) (V.frameOf rv) /\
V.forall_all h0 rv
(fun b -> HS.disjoint (Rgl?.region_of rg b)
(Rgl?.region_of rg v))))
(ensures (fun h0 irv h1 ->
V.size_of irv = V.size_of rv + 1ul /\
V.frameOf rv = V.frameOf irv /\
modifies (loc_union (V.loc_addr_of_vector rv)
(V.loc_vector irv)) h0 h1 /\
rv_inv h1 irv /\
V.get h1 irv (V.size_of rv) == v /\
S.equal (as_seq h1 irv)
(S.snoc (as_seq h0 rv) (Rgl?.r_repr rg h0 v))))
#reset-options "--z3rlimit 20"
let insert #a #rst #rg rv v =
let hh0 = HST.get () in
let irv = V.insert rv v in
let hh1 = HST.get () in
// Safety
rs_loc_elems_parent_disj
rg (V.as_seq hh0 rv) (V.frameOf rv) 0 (U32.v (V.size_of rv));
rs_elems_inv_preserved
rg (V.as_seq hh0 rv) 0 (U32.v (V.size_of rv))
(loc_region_only false (V.frameOf rv))
hh0 hh1;
Rgl?.r_sep rg v
(loc_region_only false (V.frameOf rv))
hh0 hh1;
// Correctness
assert (S.equal (V.as_seq hh0 rv)
(S.slice (V.as_seq hh1 irv) 0 (U32.v (V.size_of rv))));
as_seq_seq_preserved
rg (V.as_seq hh0 rv)
0 (U32.v (V.size_of rv))
(loc_region_only false (V.frameOf rv)) hh0 hh1;
as_seq_seq_slice
rg hh1 (V.as_seq hh1 irv) 0 (U32.v (V.size_of irv))
0 (U32.v (V.size_of rv));
irv
val insert_copy:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> cp:copyable #rst a rg ->
rv:rvector rg{not (V.is_full rv)} -> v:a ->
HST.ST (rvector rg)
(requires (fun h0 ->
rv_inv h0 rv /\ rg_inv rg h0 v /\
HS.disjoint (Rgl?.region_of rg v) (V.frameOf rv)))
(ensures (fun h0 irv h1 ->
V.size_of irv = V.size_of rv + 1ul /\
V.frameOf rv = V.frameOf irv /\
modifies (loc_rvector rv) h0 h1 /\
rv_inv h1 irv /\
S.equal (as_seq h1 irv)
(S.snoc (as_seq h0 rv) (Rgl?.r_repr rg h0 v))))
let insert_copy #a #rst #rg cp rv v =
let hh0 = HST.get () in
rv_elems_inv_live_region hh0 rv 0ul (V.size_of rv);
let nrid = HST.new_region (V.frameOf rv) in
let nv = rg_alloc rg nrid in
let hh1 = HST.get () in
Rgl?.r_sep rg v loc_none hh0 hh1;
rv_inv_preserved rv loc_none hh0 hh1;
as_seq_preserved rv loc_none hh0 hh1;
Cpy?.copy cp (Rgl?.state rg) v nv;
let hh2 = HST.get () in
rv_loc_elems_each_disj hh2 rv 0ul (V.size_of rv) nrid;
rv_inv_preserved_ rv (loc_all_regions_from false nrid) hh1 hh2;
as_seq_preserved_ rv (loc_all_regions_from false nrid) hh1 hh2;
insert rv nv
val assign:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} -> v:a ->
HST.ST unit
(requires (fun h0 ->
// rv_inv h0 rv /\
rv_itself_inv h0 rv /\
rv_elems_inv h0 rv 0ul i /\
rv_elems_inv h0 rv (i + 1ul) (V.size_of rv) /\
elems_reg h0 rv /\
V.forall_ h0 rv 0ul i
(fun b -> HS.disjoint (Rgl?.region_of rg b)
(Rgl?.region_of rg v)) /\
V.forall_ h0 rv (i + 1ul) (V.size_of rv)
(fun b -> HS.disjoint (Rgl?.region_of rg b)
(Rgl?.region_of rg v)) /\
rg_inv rg h0 v /\
HS.extends (Rgl?.region_of rg v) (V.frameOf rv)))
(ensures (fun h0 _ h1 ->
modifies (V.loc_vector_within rv i (i + 1ul)) h0 h1 /\
rv_inv h1 rv /\
V.get h1 rv i == v /\
S.equal (as_seq h1 rv)
(S.append
(as_seq_sub h0 rv 0ul i)
(S.cons (Rgl?.r_repr rg h0 v)
(as_seq_sub h0 rv (i + 1ul) (V.size_of rv))))))
let assign #a #rst #rg rv i v =
let hh0 = HST.get () in
V.assign rv i v;
let hh1 = HST.get () in
// Safety
rs_loc_elems_parent_disj
rg (V.as_seq hh0 rv) (V.frameOf rv) 0 (U32.v i);
rs_loc_elems_parent_disj
rg (V.as_seq hh0 rv) (V.frameOf rv) (U32.v i + 1) (U32.v (V.size_of rv));
rs_elems_inv_preserved
rg (V.as_seq hh0 rv) 0 (U32.v i)
(V.loc_vector rv)
hh0 hh1;
rs_elems_inv_preserved
rg (V.as_seq hh0 rv) (U32.v i + 1) (U32.v (V.size_of rv))
(V.loc_vector rv)
hh0 hh1;
Rgl?.r_sep rg v (V.loc_vector rv) hh0 hh1;
// Correctness
rs_loc_elems_parent_disj
rg (V.as_seq hh1 rv) (V.frameOf rv) 0 (U32.v (V.size_of rv));
as_seq_seq_preserved
rg (V.as_seq hh1 rv)
0 (U32.v (V.size_of rv))
(V.loc_vector rv) hh0 hh1
private val r_sep_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
v:a{rg_inv rg h0 v} ->
Lemma (requires (loc_disjoint (loc_all_regions_from
false (Rgl?.region_of rg v)) p /\
modifies p h0 h1))
(ensures (rg_inv rg h1 v /\
Rgl?.r_repr rg h0 v == Rgl?.r_repr rg h1 v))
private let r_sep_forall #a #rst rg p h0 h1 v =
Rgl?.r_sep rg v p h0 h1
val assign_copy:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> cp:copyable #rst a rg ->
rv:rvector rg ->
i:uint32_t{i < V.size_of rv} -> v:a ->
HST.ST unit
(requires (fun h0 ->
rv_inv h0 rv /\
rg_inv rg h0 v /\
HS.disjoint (Rgl?.region_of rg v) (V.frameOf rv)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg (V.get h1 rv i))) h0 h1 /\
rv_inv h1 rv /\
S.equal (as_seq h1 rv) | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val assign_copy:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> cp:copyable #rst a rg ->
rv:rvector rg ->
i:uint32_t{i < V.size_of rv} -> v:a ->
HST.ST unit
(requires (fun h0 ->
rv_inv h0 rv /\
rg_inv rg h0 v /\
HS.disjoint (Rgl?.region_of rg v) (V.frameOf rv)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg (V.get h1 rv i))) h0 h1 /\
rv_inv h1 rv /\
S.equal (as_seq h1 rv)
(S.upd (as_seq h0 rv) (U32.v i) (Rgl?.r_repr rg h0 v)))) | [] | LowStar.RVector.assign_copy | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
cp: LowStar.RVector.copyable a rg ->
rv: LowStar.RVector.rvector rg ->
i: LowStar.Vector.uint32_t{i < LowStar.Vector.size_of rv} ->
v: a
-> FStar.HyperStack.ST.ST Prims.unit | {
"end_col": 59,
"end_line": 993,
"start_col": 39,
"start_line": 958
} |
FStar.HyperStack.ST.ST | val shrink:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg -> new_size:uint32_t{new_size <= V.size_of rv} ->
HST.ST (rvector rg)
(requires (fun h0 -> rv_inv h0 rv))
(ensures (fun h0 frv h1 ->
V.size_of frv = new_size /\
V.frameOf rv = V.frameOf frv /\
modifies (loc_rvector rv) h0 h1 /\
rv_inv h1 frv /\
S.equal (as_seq h1 frv)
(S.slice (as_seq h0 rv) 0 (U32.v new_size)))) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let shrink #a #rst #rg rv new_size =
let size = V.size_of rv in
[@@inline_let] let sz = U32.v size in
[@@inline_let] let nsz = U32.v new_size in
let hh0 = HST.get () in
if new_size >= size then rv else
begin
free_elems_from rv new_size;
rv_loc_elems_included hh0 rv new_size size;
let hh1 = HST.get () in
assert (modifies (rs_loc_elems rg (V.as_seq hh0 rv) nsz sz) hh0 hh1);
let frv = V.shrink rv new_size in
let hh2 = HST.get () in
assert (modifies (loc_region_only false (V.frameOf rv)) hh1 hh2);
// Safety
rs_loc_elems_disj
rg (V.as_seq hh0 rv) (V.frameOf rv) 0 sz
0 nsz nsz sz;
rs_loc_elems_parent_disj
rg (V.as_seq hh0 rv) (V.frameOf rv) 0 nsz;
rs_elems_inv_preserved
rg (V.as_seq hh0 rv) 0 nsz
(loc_union (rs_loc_elems rg (V.as_seq hh0 rv) nsz sz)
(loc_region_only false (V.frameOf rv)))
hh0 hh2;
assert (rv_inv #a #rst #rg hh2 frv);
// Correctness
as_seq_seq_preserved
rg (V.as_seq hh0 rv) 0 nsz
(loc_union (rs_loc_elems rg (V.as_seq hh0 rv) nsz sz)
(loc_region_only false (V.frameOf rv)))
hh0 hh2;
as_seq_seq_slice
rg hh0 (V.as_seq hh0 rv) 0 sz 0 nsz;
assert (S.equal (S.slice (as_seq hh0 rv) 0 nsz)
(as_seq_seq rg hh2 (V.as_seq hh0 rv) 0 nsz));
as_seq_seq_eq
rg hh2 (V.as_seq hh0 rv) (V.as_seq hh2 frv) 0 nsz 0 nsz;
assert (S.equal (as_seq_seq rg hh2 (V.as_seq hh2 frv) 0 nsz)
(as_seq_seq rg hh2 (V.as_seq hh0 rv) 0 nsz));
assert (S.equal (S.slice (as_seq hh0 rv) 0 nsz)
(as_seq hh2 frv));
frv
end | val shrink:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg -> new_size:uint32_t{new_size <= V.size_of rv} ->
HST.ST (rvector rg)
(requires (fun h0 -> rv_inv h0 rv))
(ensures (fun h0 frv h1 ->
V.size_of frv = new_size /\
V.frameOf rv = V.frameOf frv /\
modifies (loc_rvector rv) h0 h1 /\
rv_inv h1 frv /\
S.equal (as_seq h1 frv)
(S.slice (as_seq h0 rv) 0 (U32.v new_size))))
let shrink #a #rst #rg rv new_size = | true | null | false | let size = V.size_of rv in
[@@ inline_let ]let sz = U32.v size in
[@@ inline_let ]let nsz = U32.v new_size in
let hh0 = HST.get () in
if new_size >= size
then rv
else
(free_elems_from rv new_size;
rv_loc_elems_included hh0 rv new_size size;
let hh1 = HST.get () in
assert (modifies (rs_loc_elems rg (V.as_seq hh0 rv) nsz sz) hh0 hh1);
let frv = V.shrink rv new_size in
let hh2 = HST.get () in
assert (modifies (loc_region_only false (V.frameOf rv)) hh1 hh2);
rs_loc_elems_disj rg (V.as_seq hh0 rv) (V.frameOf rv) 0 sz 0 nsz nsz sz;
rs_loc_elems_parent_disj rg (V.as_seq hh0 rv) (V.frameOf rv) 0 nsz;
rs_elems_inv_preserved rg
(V.as_seq hh0 rv)
0
nsz
(loc_union (rs_loc_elems rg (V.as_seq hh0 rv) nsz sz) (loc_region_only false (V.frameOf rv)))
hh0
hh2;
assert (rv_inv #a #rst #rg hh2 frv);
as_seq_seq_preserved rg
(V.as_seq hh0 rv)
0
nsz
(loc_union (rs_loc_elems rg (V.as_seq hh0 rv) nsz sz) (loc_region_only false (V.frameOf rv)))
hh0
hh2;
as_seq_seq_slice rg hh0 (V.as_seq hh0 rv) 0 sz 0 nsz;
assert (S.equal (S.slice (as_seq hh0 rv) 0 nsz) (as_seq_seq rg hh2 (V.as_seq hh0 rv) 0 nsz));
as_seq_seq_eq rg hh2 (V.as_seq hh0 rv) (V.as_seq hh2 frv) 0 nsz 0 nsz;
assert (S.equal (as_seq_seq rg hh2 (V.as_seq hh2 frv) 0 nsz)
(as_seq_seq rg hh2 (V.as_seq hh0 rv) 0 nsz));
assert (S.equal (S.slice (as_seq hh0 rv) 0 nsz) (as_seq hh2 frv));
frv) | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [] | [
"LowStar.Regional.regional",
"LowStar.RVector.rvector",
"LowStar.Vector.uint32_t",
"Prims.b2t",
"FStar.Integers.op_Less_Equals",
"FStar.Integers.Unsigned",
"FStar.Integers.W32",
"LowStar.Vector.size_of",
"FStar.Integers.op_Greater_Equals",
"Prims.bool",
"Prims.unit",
"Prims._assert",
"FStar.Seq.Base.equal",
"LowStar.Regional.__proj__Rgl__item__repr",
"FStar.Seq.Base.slice",
"LowStar.RVector.as_seq",
"LowStar.RVector.as_seq_seq",
"LowStar.Vector.as_seq",
"LowStar.RVector.as_seq_seq_eq",
"LowStar.RVector.as_seq_seq_slice",
"LowStar.RVector.as_seq_seq_preserved",
"LowStar.Monotonic.Buffer.loc_union",
"LowStar.RVector.rs_loc_elems",
"LowStar.Monotonic.Buffer.loc_region_only",
"LowStar.Vector.frameOf",
"LowStar.RVector.rv_inv",
"LowStar.RVector.rs_elems_inv_preserved",
"LowStar.RVector.rs_loc_elems_parent_disj",
"LowStar.RVector.rs_loc_elems_disj",
"LowStar.Monotonic.Buffer.modifies",
"FStar.Monotonic.HyperStack.mem",
"FStar.HyperStack.ST.get",
"LowStar.Vector.vector",
"LowStar.Vector.shrink",
"LowStar.RVector.rv_loc_elems_included",
"LowStar.RVector.free_elems_from",
"FStar.UInt.uint_t",
"FStar.UInt32.v"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1))
val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j)))
let rv_loc_elems_included #a #rst #rg h rv i j =
rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
// Properties about disjointness of locations
val rs_loc_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
l:nat{i <= l && l < j && k <> l} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj #a #rst rg rs prid i j k l = ()
val rs_loc_elem_disj_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (
forall (k:nat{i <= k && k < j}).
forall (l:nat{i <= l && l < j && k <> l}).
loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj_forall #a #rst rg rs prid i j = ()
val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2)
let rec rs_loc_elems_elem_disj #a #rst rg rs prid i j k1 k2 l =
if k1 = k2 then ()
else (rs_loc_elem_disj rg rs prid i j (k2 - 1) l;
rs_loc_elems_elem_disj rg rs prid i j k1 (k2 - 1) l)
val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2)
let rec rs_loc_elems_disj #a #rst rg rs prid i j k1 k2 l1 l2 =
if k1 = k2 then ()
else (rs_loc_elems_elem_disj rg rs prid i j l1 l2 (k2 - 1);
rs_loc_elems_disj rg rs prid i j k1 (k2 - 1) l1 l2)
val rv_loc_elems_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
k1:uint32_t{i <= k1} ->
k2:uint32_t{k1 <= k2 && k2 <= j} ->
l1:uint32_t{i <= l1} ->
l2:uint32_t{l1 <= l2 && l2 <= j} ->
Lemma (requires (rv_elems_reg h rv i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rv_loc_elems h rv k1 k2)
(rv_loc_elems h rv l1 l2)))
let rv_loc_elems_disj #a #rst #rg h rv i j k1 k2 l1 l2 =
rs_loc_elems_disj rg (V.as_seq h rv) (V.frameOf rv)
(U32.v i) (U32.v j) (U32.v k1) (U32.v k2) (U32.v l1) (U32.v l2)
val rs_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_region_only false prid)))
(decreases j)
let rec rs_loc_elems_parent_disj #a #rst rg rs prid i j =
if i = j then ()
else rs_loc_elems_parent_disj rg rs prid i (j - 1)
val rv_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_region_only false (V.frameOf rv))))
let rv_loc_elems_parent_disj #a #rst #rg h rv i j =
rs_loc_elems_parent_disj rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val rs_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> drid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (V.forall_seq rs i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_all_regions_from false drid)))
(decreases j)
let rec rs_loc_elems_each_disj #a #rst rg rs drid i j =
if i = j then ()
else rs_loc_elems_each_disj rg rs drid i (j - 1)
val rv_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
drid:HS.rid ->
Lemma (requires (V.forall_ h rv i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_all_regions_from false drid)))
let rv_loc_elems_each_disj #a #rst #rg h rv i j drid =
rs_loc_elems_each_disj rg (V.as_seq h rv) drid (U32.v i) (U32.v j)
// Preservation based on disjointness
val rv_loc_elems_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
loc_disjoint p (V.loc_vector_within rv i j) /\
modifies p h0 h1))
(ensures (rv_loc_elems h0 rv i j ==
rv_loc_elems h1 rv i j))
(decreases (U32.v j))
let rec rv_loc_elems_preserved #a #rst #rg rv i j p h0 h1 =
if i = j then ()
else (V.loc_vector_within_includes rv i j (j - 1ul) j;
V.get_preserved rv (j - 1ul) p h0 h1;
assert (V.get h0 rv (j - 1ul) == V.get h1 rv (j - 1ul));
V.loc_vector_within_includes rv i j i (j - 1ul);
rv_loc_elems_preserved rv i (j - 1ul) p h0 h1)
val rs_elems_inv_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv rg h1 rs i j))
(decreases j)
let rec rs_elems_inv_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val rv_elems_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (V.loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
modifies p h0 h1))
(ensures (rv_elems_inv h1 rv i j))
let rv_elems_inv_preserved #a #rst #rg rv i j p h0 h1 =
rs_elems_inv_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val rv_inv_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_ #a #rst #rg rv p h0 h1 =
rv_elems_inv_preserved #a #rst #rg rv 0ul (V.size_of rv) p h0 h1
// The first core lemma of `rvector`
val rv_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let rv_inv_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
rv_inv_preserved_ rv p h0 h1
val rv_inv_preserved_int:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
modifies (loc_all_regions_from false
(Rgl?.region_of rg (V.get h0 rv i))) h0 h1 /\
rg_inv rg h1 (V.get h1 rv i)))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_int #a #rst #rg rv i h0 h1 =
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv)) 0 (U32.v i) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) 0 (U32.v i)
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1;
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv))
(U32.v i + 1) (U32.v (V.size_of rv)) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) (U32.v i + 1) (U32.v (V.size_of rv))
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1
/// Representation
val as_seq_seq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = j - i})
(decreases j)
let rec as_seq_seq #a #rst rg h rs i j =
if i = j then S.empty
else S.snoc (as_seq_seq rg h rs i (j - 1))
(Rgl?.r_repr rg h (S.index rs (j - 1)))
val as_seq_sub:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t ->
j:uint32_t{
i <= j /\
j <= V.size_of rv /\
rv_elems_inv h rv i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v j - U32.v i})
(decreases (U32.v j))
let as_seq_sub #a #rst #rg h rv i j =
as_seq_seq rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v (V.size_of rv)})
let as_seq #a #rst #rg h rv =
as_seq_sub h rv 0ul (V.size_of rv)
val as_seq_sub_as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
Lemma (S.equal (as_seq_sub h rv 0ul (V.size_of rv))
(as_seq h rv))
[SMTPat (as_seq_sub h rv 0ul (V.size_of rv))]
let as_seq_sub_as_seq #a #rst #rg h rv = ()
val as_seq_seq_index:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat{k < j - i} ->
Lemma (requires true)
(ensures (S.index (as_seq_seq rg h rs i j) k ==
Rgl?.r_repr rg h (S.index rs (i + k))))
(decreases j)
[SMTPat (S.index (as_seq_seq rg h rs i j) k)]
let rec as_seq_seq_index #a #rst rg h rs i j k =
if i = j then ()
else if k = j - i - 1 then ()
else as_seq_seq_index rg h rs i (j - 1) k
val as_seq_seq_eq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs1:S.seq a -> rs2:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs1 /\ rs_elems_inv rg h rs1 i j} ->
k:nat ->
l:nat{k <= l /\ l <= S.length rs2 /\ rs_elems_inv rg h rs2 k l} ->
Lemma (requires (S.equal (S.slice rs1 i j) (S.slice rs2 k l)))
(ensures (S.equal (as_seq_seq rg h rs1 i j)
(as_seq_seq rg h rs2 k l)))
let as_seq_seq_eq #a #rst rg h rs1 rs2 i j k l =
assert (forall (a:nat{a < j - i}).
S.index (as_seq_seq rg h rs1 i j) a ==
Rgl?.r_repr rg h (S.index rs1 (i + a)));
assert (forall (a:nat{a < l - k}).
S.index (as_seq_seq rg h rs2 k l) a ==
Rgl?.r_repr rg h (S.index rs2 (k + a)));
assert (S.length (S.slice rs1 i j) = j - i);
assert (S.length (S.slice rs2 k l) = l - k);
assert (forall (a:nat{a < j - i}).
S.index (S.slice rs1 i j) a ==
S.index (S.slice rs2 k l) a);
assert (forall (a:nat{a < j - i}).
S.index rs1 (i + a) == S.index rs2 (k + a))
val as_seq_seq_slice:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat -> l:nat{k <= l && l <= j - i} ->
Lemma (S.equal (S.slice (as_seq_seq rg h rs i j) k l)
(as_seq_seq rg h (S.slice rs (i + k) (i + l)) 0 (l - k)))
#reset-options "--z3rlimit 10"
let rec as_seq_seq_slice #a #rst rg h rs i j k l =
if k = l then ()
else (as_seq_seq_slice rg h rs i j k (l - 1);
as_seq_seq_index rg h rs i j (l - 1);
as_seq_seq_eq rg h
(S.slice rs (i + k) (i + l - 1))
(S.slice rs (i + k) (i + l))
0 (l - k - 1) 0 (l - k - 1))
val as_seq_seq_upd:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{
i <= j /\
j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
k:nat{i <= k && k < j} -> v:a{rg_inv rg h v} ->
Lemma (S.equal (as_seq_seq rg h (S.upd rs k v) i j)
(S.upd (as_seq_seq rg h rs i j) (k - i)
(Rgl?.r_repr rg h v)))
let rec as_seq_seq_upd #a #rst rg h rs i j k v =
if i = j then ()
else if k = j - 1 then ()
else as_seq_seq_upd rg h rs i (j - 1) k v
// Preservation based on disjointness
val as_seq_seq_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv_preserved rg rs i j p h0 h1;
S.equal (as_seq_seq rg h0 rs i j)
(as_seq_seq rg h1 rs i j)))
let rec as_seq_seq_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
as_seq_seq_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val as_seq_sub_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
loc_disjoint p (V.loc_vector rv) /\
modifies p h0 h1))
(ensures (rv_elems_inv_preserved rv i j p h0 h1;
S.equal (as_seq_sub h0 rv i j)
(as_seq_sub h1 rv i j)))
let as_seq_sub_preserved #a #rst #rg rv i j p h0 h1 =
as_seq_seq_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val as_seq_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv_preserved_ rv p h0 h1;
S.equal (as_seq h0 rv) (as_seq h1 rv)))
let as_seq_preserved_ #a #rst #rg rv p h0 h1 =
as_seq_sub_preserved rv 0ul (V.size_of rv) p h0 h1
// The second core lemma of `rvector`
val as_seq_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv_preserved rv p h0 h1;
S.equal (as_seq h0 rv) (as_seq h1 rv)))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let as_seq_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
as_seq_preserved_ rv p h0 h1
/// Construction
val alloc_empty:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 bv h1 -> h0 == h1 /\ V.size_of bv = 0ul))
let alloc_empty #a #rst rg =
V.alloc_empty a
val alloc_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
cidx:uint32_t{cidx <= V.size_of rv} ->
HST.ST unit
(requires (fun h0 -> rv_itself_inv h0 rv))
(ensures (fun h0 _ h1 ->
modifies (V.loc_vector_within rv 0ul cidx) h0 h1 /\
rv_itself_inv h1 rv /\
rv_elems_inv h1 rv 0ul cidx /\
rv_elems_reg h1 rv 0ul cidx /\
S.equal (as_seq_sub h1 rv 0ul cidx)
(S.create (U32.v cidx) (Ghost.reveal (Rgl?.irepr rg))) /\
// the loop invariant for this function
V.forall_ h1 rv 0ul cidx
(fun r -> HS.fresh_region (Rgl?.region_of rg r) h0 h1 /\
Rgl?.r_alloc_p rg r) /\
Set.subset (Map.domain (HS.get_hmap h0))
(Map.domain (HS.get_hmap h1))))
(decreases (U32.v cidx))
#reset-options "--z3rlimit 20"
let rec alloc_ #a #rst #rg rv cidx =
let hh0 = HST.get () in
if cidx = 0ul then ()
else (let nrid = HST.new_region (V.frameOf rv) in
let v = rg_alloc rg nrid in
let hh1 = HST.get () in
V.assign rv (cidx - 1ul) v;
let hh2 = HST.get () in
V.loc_vector_within_included rv (cidx - 1ul) cidx;
Rgl?.r_sep
rg (V.get hh2 rv (cidx - 1ul))
(V.loc_vector_within rv (cidx - 1ul) cidx)
hh1 hh2;
alloc_ rv (cidx - 1ul);
let hh3 = HST.get () in
V.loc_vector_within_included rv 0ul (cidx - 1ul);
Rgl?.r_sep
rg (V.get hh3 rv (cidx - 1ul))
(V.loc_vector_within rv 0ul (cidx - 1ul))
hh2 hh3;
V.forall2_extend hh3 rv 0ul (cidx - 1ul)
(fun r1 r2 -> HS.disjoint (Rgl?.region_of rg r1)
(Rgl?.region_of rg r2));
V.loc_vector_within_union_rev rv 0ul cidx)
val alloc_rid:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} -> rid:HST.erid ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
V.frameOf rv = rid /\
V.size_of rv = len /\
V.forall_all h1 rv (fun r -> Rgl?.r_alloc_p rg r) /\
S.equal (as_seq h1 rv)
(S.create (U32.v len) (Ghost.reveal (Rgl?.irepr rg)))))
let alloc_rid #a #rst rg len rid =
let vec = V.alloc_rid len (rg_dummy rg) rid in
alloc_ #a #rst #rg vec len;
V.loc_vector_within_included vec 0ul len;
vec
val alloc_reserve:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} -> rid:HST.erid ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
V.frameOf rv = rid /\
V.size_of rv = 0ul /\
S.equal (as_seq h1 rv) S.empty /\
Set.equal (Map.domain (HS.get_hmap h0))
(Map.domain (HS.get_hmap h1)) /\
B.fresh_loc (V.loc_vector rv) h0 h1))
let alloc_reserve #a #rst rg len rid =
V.alloc_reserve len (rg_dummy rg) rid
val alloc:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
HS.fresh_region (V.frameOf rv) h0 h1 /\
V.size_of rv = len /\
V.forall_all h1 rv (fun r -> Rgl?.r_alloc_p rg r) /\
S.equal (as_seq h1 rv)
(S.create (U32.v len) (Ghost.reveal (Rgl?.irepr rg)))))
let alloc #a #rst rg len =
let nrid = HST.new_region HS.root in
alloc_rid rg len nrid
val insert:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg{not (V.is_full rv)} -> v:a ->
HST.ST (rvector rg)
(requires (fun h0 ->
rv_inv h0 rv /\ rg_inv rg h0 v /\
HS.extends (Rgl?.region_of rg v) (V.frameOf rv) /\
V.forall_all h0 rv
(fun b -> HS.disjoint (Rgl?.region_of rg b)
(Rgl?.region_of rg v))))
(ensures (fun h0 irv h1 ->
V.size_of irv = V.size_of rv + 1ul /\
V.frameOf rv = V.frameOf irv /\
modifies (loc_union (V.loc_addr_of_vector rv)
(V.loc_vector irv)) h0 h1 /\
rv_inv h1 irv /\
V.get h1 irv (V.size_of rv) == v /\
S.equal (as_seq h1 irv)
(S.snoc (as_seq h0 rv) (Rgl?.r_repr rg h0 v))))
#reset-options "--z3rlimit 20"
let insert #a #rst #rg rv v =
let hh0 = HST.get () in
let irv = V.insert rv v in
let hh1 = HST.get () in
// Safety
rs_loc_elems_parent_disj
rg (V.as_seq hh0 rv) (V.frameOf rv) 0 (U32.v (V.size_of rv));
rs_elems_inv_preserved
rg (V.as_seq hh0 rv) 0 (U32.v (V.size_of rv))
(loc_region_only false (V.frameOf rv))
hh0 hh1;
Rgl?.r_sep rg v
(loc_region_only false (V.frameOf rv))
hh0 hh1;
// Correctness
assert (S.equal (V.as_seq hh0 rv)
(S.slice (V.as_seq hh1 irv) 0 (U32.v (V.size_of rv))));
as_seq_seq_preserved
rg (V.as_seq hh0 rv)
0 (U32.v (V.size_of rv))
(loc_region_only false (V.frameOf rv)) hh0 hh1;
as_seq_seq_slice
rg hh1 (V.as_seq hh1 irv) 0 (U32.v (V.size_of irv))
0 (U32.v (V.size_of rv));
irv
val insert_copy:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> cp:copyable #rst a rg ->
rv:rvector rg{not (V.is_full rv)} -> v:a ->
HST.ST (rvector rg)
(requires (fun h0 ->
rv_inv h0 rv /\ rg_inv rg h0 v /\
HS.disjoint (Rgl?.region_of rg v) (V.frameOf rv)))
(ensures (fun h0 irv h1 ->
V.size_of irv = V.size_of rv + 1ul /\
V.frameOf rv = V.frameOf irv /\
modifies (loc_rvector rv) h0 h1 /\
rv_inv h1 irv /\
S.equal (as_seq h1 irv)
(S.snoc (as_seq h0 rv) (Rgl?.r_repr rg h0 v))))
let insert_copy #a #rst #rg cp rv v =
let hh0 = HST.get () in
rv_elems_inv_live_region hh0 rv 0ul (V.size_of rv);
let nrid = HST.new_region (V.frameOf rv) in
let nv = rg_alloc rg nrid in
let hh1 = HST.get () in
Rgl?.r_sep rg v loc_none hh0 hh1;
rv_inv_preserved rv loc_none hh0 hh1;
as_seq_preserved rv loc_none hh0 hh1;
Cpy?.copy cp (Rgl?.state rg) v nv;
let hh2 = HST.get () in
rv_loc_elems_each_disj hh2 rv 0ul (V.size_of rv) nrid;
rv_inv_preserved_ rv (loc_all_regions_from false nrid) hh1 hh2;
as_seq_preserved_ rv (loc_all_regions_from false nrid) hh1 hh2;
insert rv nv
val assign:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} -> v:a ->
HST.ST unit
(requires (fun h0 ->
// rv_inv h0 rv /\
rv_itself_inv h0 rv /\
rv_elems_inv h0 rv 0ul i /\
rv_elems_inv h0 rv (i + 1ul) (V.size_of rv) /\
elems_reg h0 rv /\
V.forall_ h0 rv 0ul i
(fun b -> HS.disjoint (Rgl?.region_of rg b)
(Rgl?.region_of rg v)) /\
V.forall_ h0 rv (i + 1ul) (V.size_of rv)
(fun b -> HS.disjoint (Rgl?.region_of rg b)
(Rgl?.region_of rg v)) /\
rg_inv rg h0 v /\
HS.extends (Rgl?.region_of rg v) (V.frameOf rv)))
(ensures (fun h0 _ h1 ->
modifies (V.loc_vector_within rv i (i + 1ul)) h0 h1 /\
rv_inv h1 rv /\
V.get h1 rv i == v /\
S.equal (as_seq h1 rv)
(S.append
(as_seq_sub h0 rv 0ul i)
(S.cons (Rgl?.r_repr rg h0 v)
(as_seq_sub h0 rv (i + 1ul) (V.size_of rv))))))
let assign #a #rst #rg rv i v =
let hh0 = HST.get () in
V.assign rv i v;
let hh1 = HST.get () in
// Safety
rs_loc_elems_parent_disj
rg (V.as_seq hh0 rv) (V.frameOf rv) 0 (U32.v i);
rs_loc_elems_parent_disj
rg (V.as_seq hh0 rv) (V.frameOf rv) (U32.v i + 1) (U32.v (V.size_of rv));
rs_elems_inv_preserved
rg (V.as_seq hh0 rv) 0 (U32.v i)
(V.loc_vector rv)
hh0 hh1;
rs_elems_inv_preserved
rg (V.as_seq hh0 rv) (U32.v i + 1) (U32.v (V.size_of rv))
(V.loc_vector rv)
hh0 hh1;
Rgl?.r_sep rg v (V.loc_vector rv) hh0 hh1;
// Correctness
rs_loc_elems_parent_disj
rg (V.as_seq hh1 rv) (V.frameOf rv) 0 (U32.v (V.size_of rv));
as_seq_seq_preserved
rg (V.as_seq hh1 rv)
0 (U32.v (V.size_of rv))
(V.loc_vector rv) hh0 hh1
private val r_sep_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
v:a{rg_inv rg h0 v} ->
Lemma (requires (loc_disjoint (loc_all_regions_from
false (Rgl?.region_of rg v)) p /\
modifies p h0 h1))
(ensures (rg_inv rg h1 v /\
Rgl?.r_repr rg h0 v == Rgl?.r_repr rg h1 v))
private let r_sep_forall #a #rst rg p h0 h1 v =
Rgl?.r_sep rg v p h0 h1
val assign_copy:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> cp:copyable #rst a rg ->
rv:rvector rg ->
i:uint32_t{i < V.size_of rv} -> v:a ->
HST.ST unit
(requires (fun h0 ->
rv_inv h0 rv /\
rg_inv rg h0 v /\
HS.disjoint (Rgl?.region_of rg v) (V.frameOf rv)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg (V.get h1 rv i))) h0 h1 /\
rv_inv h1 rv /\
S.equal (as_seq h1 rv)
(S.upd (as_seq h0 rv) (U32.v i) (Rgl?.r_repr rg h0 v))))
let assign_copy #a #rst #rg cp rv i v =
let hh0 = HST.get () in
Cpy?.copy cp (Rgl?.state rg) v (V.index rv i);
let hh1 = HST.get () in
// Safety
rv_inv_preserved_int #a #rst #rg rv i hh0 hh1;
// Correctness
forall_intro
(move_requires
(rs_loc_elem_disj
rg (V.as_seq hh0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv))
(U32.v i)));
assert (forall (k:nat{k <> U32.v i && k < U32.v (V.size_of rv)}).
loc_disjoint (rs_loc_elem rg (V.as_seq hh0 rv) k)
(rs_loc_elem rg (V.as_seq hh0 rv) (U32.v i)));
forall_intro
(move_requires
(r_sep_forall
rg (rs_loc_elem rg (V.as_seq hh0 rv) (U32.v i))
hh0 hh1));
assert (forall (k:nat{k <> U32.v i && k < U32.v (V.size_of rv)}).
loc_disjoint (rs_loc_elem rg (V.as_seq hh0 rv) k)
(rs_loc_elem rg (V.as_seq hh0 rv) (U32.v i)) ==>
Rgl?.r_repr rg hh1 (S.index (V.as_seq hh1 rv) k) ==
Rgl?.r_repr rg hh0 (S.index (V.as_seq hh0 rv) k));
assert (forall (k:nat{k <> U32.v i && k < U32.v (V.size_of rv)}).
Rgl?.r_repr rg hh1 (S.index (V.as_seq hh1 rv) k) ==
Rgl?.r_repr rg hh0 (S.index (V.as_seq hh0 rv) k));
assert (forall (k:nat{k <> U32.v i && k < U32.v (V.size_of rv)}).
S.index (as_seq_seq rg hh1 (V.as_seq hh1 rv)
0 (U32.v (V.size_of rv))) k ==
S.index (as_seq_seq rg hh0 (V.as_seq hh0 rv)
0 (U32.v (V.size_of rv))) k)
val free_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
idx:uint32_t{idx < V.size_of rv} ->
HST.ST unit
(requires (fun h0 ->
V.live h0 rv /\
rv_elems_inv h0 rv 0ul (idx + 1ul) /\
rv_elems_reg h0 rv 0ul (idx + 1ul)))
(ensures (fun h0 _ h1 ->
modifies (rv_loc_elems h0 rv 0ul (idx + 1ul)) h0 h1))
let rec free_elems #a #rst #rg rv idx =
let hh0 = HST.get () in
rg_free rg (V.index rv idx);
let hh1 = HST.get () in
rs_loc_elems_elem_disj
rg (V.as_seq hh0 rv) (V.frameOf rv)
0 (U32.v idx + 1) 0 (U32.v idx) (U32.v idx);
rv_elems_inv_preserved
rv 0ul idx (rs_loc_elem rg (V.as_seq hh0 rv) (U32.v idx)) hh0 hh1;
if idx <> 0ul then
free_elems rv (idx - 1ul)
val flush:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg -> i:uint32_t{i <= V.size_of rv} ->
HST.ST (rvector rg)
(requires (fun h0 -> rv_inv h0 rv))
(ensures (fun h0 frv h1 ->
V.size_of frv = V.size_of rv - i /\
V.frameOf rv = V.frameOf frv /\
modifies (loc_rvector rv) h0 h1 /\
rv_inv h1 frv /\
S.equal (as_seq h1 frv)
(S.slice (as_seq h0 rv) (U32.v i) (U32.v (V.size_of rv)))))
#reset-options "--z3rlimit 40"
let flush #a #rst #rg rv i =
let hh0 = HST.get () in
(if i = 0ul then () else free_elems rv (i - 1ul));
rv_loc_elems_included hh0 rv 0ul i;
let hh1 = HST.get () in
assert (modifies (rs_loc_elems rg (V.as_seq hh0 rv) 0 (U32.v i)) hh0 hh1);
let frv = V.flush rv (rg_dummy rg) i in
let hh2 = HST.get () in
assert (modifies (loc_region_only false (V.frameOf rv)) hh1 hh2);
// Safety
rs_loc_elems_disj
rg (V.as_seq hh0 rv) (V.frameOf rv) 0 (U32.v (V.size_of rv))
0 (U32.v i) (U32.v i) (U32.v (V.size_of rv));
rs_loc_elems_parent_disj
rg (V.as_seq hh0 rv) (V.frameOf rv)
(U32.v i) (U32.v (V.size_of rv));
rs_elems_inv_preserved
rg (V.as_seq hh0 rv) (U32.v i) (U32.v (V.size_of rv))
(loc_union (rs_loc_elems rg (V.as_seq hh0 rv) 0 (U32.v i))
(loc_region_only false (V.frameOf rv)))
hh0 hh2;
assert (rv_inv #a #rst #rg hh2 frv);
// Correctness
as_seq_seq_preserved
rg (V.as_seq hh0 rv) (U32.v i) (U32.v (V.size_of rv))
(loc_union (rs_loc_elems rg (V.as_seq hh0 rv) 0 (U32.v i))
(loc_region_only false (V.frameOf rv)))
hh0 hh2;
as_seq_seq_slice
rg hh0 (V.as_seq hh0 rv) 0 (U32.v (V.size_of rv))
(U32.v i) (U32.v (V.size_of rv));
assert (S.equal (S.slice (as_seq hh0 rv) (U32.v i) (U32.v (V.size_of rv)))
(as_seq_seq rg hh2 (V.as_seq hh0 rv)
(U32.v i) (U32.v (V.size_of rv))));
as_seq_seq_eq
rg hh2 (V.as_seq hh0 rv) (V.as_seq hh2 frv)
(U32.v i) (U32.v (V.size_of rv)) 0 (U32.v (V.size_of frv));
assert (S.equal (as_seq_seq rg hh2 (V.as_seq hh2 frv)
0 (U32.v (V.size_of frv)))
(as_seq_seq rg hh2 (V.as_seq hh0 rv)
(U32.v i) (U32.v (V.size_of rv))));
assert (S.equal (S.slice (as_seq hh0 rv) (U32.v i) (U32.v (V.size_of rv)))
(as_seq hh2 frv));
frv
val free_elems_from:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
idx:uint32_t{idx < V.size_of rv} ->
HST.ST unit
(requires (fun h0 ->
V.live h0 rv /\
rv_elems_inv h0 rv idx (V.size_of rv) /\
rv_elems_reg h0 rv idx (V.size_of rv)))
(ensures (fun h0 _ h1 ->
modifies (rv_loc_elems h0 rv idx (V.size_of rv)) h0 h1))
let rec free_elems_from #a #rst #rg rv idx =
let hh0 = HST.get () in
rs_loc_elems_elem_disj
rg (V.as_seq hh0 rv) (V.frameOf rv)
(U32.v idx) (U32.v (V.size_of rv))
(U32.v idx+1) (U32.v (V.size_of rv))
(U32.v idx);
rg_free rg (V.index rv idx);
let hh1 = HST.get () in
rv_elems_inv_preserved
rv (idx+1ul) (V.size_of rv)
(rv_loc_elem hh0 rv idx) hh0 hh1;
if idx + 1ul < V.size_of rv then
begin
free_elems_from rv (idx + 1ul);
rs_loc_elems_rec_inverse rg (V.as_seq hh0 rv) (U32.v idx) (U32.v (V.size_of rv))
end
val shrink:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg -> new_size:uint32_t{new_size <= V.size_of rv} ->
HST.ST (rvector rg)
(requires (fun h0 -> rv_inv h0 rv))
(ensures (fun h0 frv h1 ->
V.size_of frv = new_size /\
V.frameOf rv = V.frameOf frv /\
modifies (loc_rvector rv) h0 h1 /\
rv_inv h1 frv /\
S.equal (as_seq h1 frv)
(S.slice (as_seq h0 rv) 0 (U32.v new_size)))) | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 40,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val shrink:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg -> new_size:uint32_t{new_size <= V.size_of rv} ->
HST.ST (rvector rg)
(requires (fun h0 -> rv_inv h0 rv))
(ensures (fun h0 frv h1 ->
V.size_of frv = new_size /\
V.frameOf rv = V.frameOf frv /\
modifies (loc_rvector rv) h0 h1 /\
rv_inv h1 frv /\
S.equal (as_seq h1 frv)
(S.slice (as_seq h0 rv) 0 (U32.v new_size)))) | [] | LowStar.RVector.shrink | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
rv: LowStar.RVector.rvector rg ->
new_size: LowStar.Vector.uint32_t{new_size <= LowStar.Vector.size_of rv}
-> FStar.HyperStack.ST.ST (LowStar.RVector.rvector rg) | {
"end_col": 3,
"end_line": 1171,
"start_col": 36,
"start_line": 1125
} |
FStar.Pervasives.Lemma | val as_seq_sub_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
loc_disjoint p (V.loc_vector rv) /\
modifies p h0 h1))
(ensures (rv_elems_inv_preserved rv i j p h0 h1;
S.equal (as_seq_sub h0 rv i j)
(as_seq_sub h1 rv i j))) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let as_seq_sub_preserved #a #rst #rg rv i j p h0 h1 =
as_seq_seq_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1 | val as_seq_sub_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
loc_disjoint p (V.loc_vector rv) /\
modifies p h0 h1))
(ensures (rv_elems_inv_preserved rv i j p h0 h1;
S.equal (as_seq_sub h0 rv i j)
(as_seq_sub h1 rv i j)))
let as_seq_sub_preserved #a #rst #rg rv i j p h0 h1 = | false | null | true | as_seq_seq_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1 | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"lemma"
] | [
"LowStar.Regional.regional",
"LowStar.RVector.rvector",
"LowStar.Vector.uint32_t",
"Prims.b2t",
"Prims.op_AmpAmp",
"FStar.Integers.op_Less_Equals",
"FStar.Integers.Unsigned",
"FStar.Integers.W32",
"LowStar.Vector.size_of",
"LowStar.Monotonic.Buffer.loc",
"FStar.Monotonic.HyperStack.mem",
"LowStar.RVector.as_seq_seq_preserved",
"LowStar.Vector.as_seq",
"FStar.UInt32.v",
"Prims.unit"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1))
val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j)))
let rv_loc_elems_included #a #rst #rg h rv i j =
rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
// Properties about disjointness of locations
val rs_loc_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
l:nat{i <= l && l < j && k <> l} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj #a #rst rg rs prid i j k l = ()
val rs_loc_elem_disj_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (
forall (k:nat{i <= k && k < j}).
forall (l:nat{i <= l && l < j && k <> l}).
loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj_forall #a #rst rg rs prid i j = ()
val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2)
let rec rs_loc_elems_elem_disj #a #rst rg rs prid i j k1 k2 l =
if k1 = k2 then ()
else (rs_loc_elem_disj rg rs prid i j (k2 - 1) l;
rs_loc_elems_elem_disj rg rs prid i j k1 (k2 - 1) l)
val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2)
let rec rs_loc_elems_disj #a #rst rg rs prid i j k1 k2 l1 l2 =
if k1 = k2 then ()
else (rs_loc_elems_elem_disj rg rs prid i j l1 l2 (k2 - 1);
rs_loc_elems_disj rg rs prid i j k1 (k2 - 1) l1 l2)
val rv_loc_elems_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
k1:uint32_t{i <= k1} ->
k2:uint32_t{k1 <= k2 && k2 <= j} ->
l1:uint32_t{i <= l1} ->
l2:uint32_t{l1 <= l2 && l2 <= j} ->
Lemma (requires (rv_elems_reg h rv i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rv_loc_elems h rv k1 k2)
(rv_loc_elems h rv l1 l2)))
let rv_loc_elems_disj #a #rst #rg h rv i j k1 k2 l1 l2 =
rs_loc_elems_disj rg (V.as_seq h rv) (V.frameOf rv)
(U32.v i) (U32.v j) (U32.v k1) (U32.v k2) (U32.v l1) (U32.v l2)
val rs_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_region_only false prid)))
(decreases j)
let rec rs_loc_elems_parent_disj #a #rst rg rs prid i j =
if i = j then ()
else rs_loc_elems_parent_disj rg rs prid i (j - 1)
val rv_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_region_only false (V.frameOf rv))))
let rv_loc_elems_parent_disj #a #rst #rg h rv i j =
rs_loc_elems_parent_disj rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val rs_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> drid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (V.forall_seq rs i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_all_regions_from false drid)))
(decreases j)
let rec rs_loc_elems_each_disj #a #rst rg rs drid i j =
if i = j then ()
else rs_loc_elems_each_disj rg rs drid i (j - 1)
val rv_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
drid:HS.rid ->
Lemma (requires (V.forall_ h rv i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_all_regions_from false drid)))
let rv_loc_elems_each_disj #a #rst #rg h rv i j drid =
rs_loc_elems_each_disj rg (V.as_seq h rv) drid (U32.v i) (U32.v j)
// Preservation based on disjointness
val rv_loc_elems_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
loc_disjoint p (V.loc_vector_within rv i j) /\
modifies p h0 h1))
(ensures (rv_loc_elems h0 rv i j ==
rv_loc_elems h1 rv i j))
(decreases (U32.v j))
let rec rv_loc_elems_preserved #a #rst #rg rv i j p h0 h1 =
if i = j then ()
else (V.loc_vector_within_includes rv i j (j - 1ul) j;
V.get_preserved rv (j - 1ul) p h0 h1;
assert (V.get h0 rv (j - 1ul) == V.get h1 rv (j - 1ul));
V.loc_vector_within_includes rv i j i (j - 1ul);
rv_loc_elems_preserved rv i (j - 1ul) p h0 h1)
val rs_elems_inv_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv rg h1 rs i j))
(decreases j)
let rec rs_elems_inv_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val rv_elems_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (V.loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
modifies p h0 h1))
(ensures (rv_elems_inv h1 rv i j))
let rv_elems_inv_preserved #a #rst #rg rv i j p h0 h1 =
rs_elems_inv_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val rv_inv_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_ #a #rst #rg rv p h0 h1 =
rv_elems_inv_preserved #a #rst #rg rv 0ul (V.size_of rv) p h0 h1
// The first core lemma of `rvector`
val rv_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let rv_inv_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
rv_inv_preserved_ rv p h0 h1
val rv_inv_preserved_int:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
modifies (loc_all_regions_from false
(Rgl?.region_of rg (V.get h0 rv i))) h0 h1 /\
rg_inv rg h1 (V.get h1 rv i)))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_int #a #rst #rg rv i h0 h1 =
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv)) 0 (U32.v i) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) 0 (U32.v i)
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1;
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv))
(U32.v i + 1) (U32.v (V.size_of rv)) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) (U32.v i + 1) (U32.v (V.size_of rv))
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1
/// Representation
val as_seq_seq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = j - i})
(decreases j)
let rec as_seq_seq #a #rst rg h rs i j =
if i = j then S.empty
else S.snoc (as_seq_seq rg h rs i (j - 1))
(Rgl?.r_repr rg h (S.index rs (j - 1)))
val as_seq_sub:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t ->
j:uint32_t{
i <= j /\
j <= V.size_of rv /\
rv_elems_inv h rv i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v j - U32.v i})
(decreases (U32.v j))
let as_seq_sub #a #rst #rg h rv i j =
as_seq_seq rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v (V.size_of rv)})
let as_seq #a #rst #rg h rv =
as_seq_sub h rv 0ul (V.size_of rv)
val as_seq_sub_as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
Lemma (S.equal (as_seq_sub h rv 0ul (V.size_of rv))
(as_seq h rv))
[SMTPat (as_seq_sub h rv 0ul (V.size_of rv))]
let as_seq_sub_as_seq #a #rst #rg h rv = ()
val as_seq_seq_index:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat{k < j - i} ->
Lemma (requires true)
(ensures (S.index (as_seq_seq rg h rs i j) k ==
Rgl?.r_repr rg h (S.index rs (i + k))))
(decreases j)
[SMTPat (S.index (as_seq_seq rg h rs i j) k)]
let rec as_seq_seq_index #a #rst rg h rs i j k =
if i = j then ()
else if k = j - i - 1 then ()
else as_seq_seq_index rg h rs i (j - 1) k
val as_seq_seq_eq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs1:S.seq a -> rs2:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs1 /\ rs_elems_inv rg h rs1 i j} ->
k:nat ->
l:nat{k <= l /\ l <= S.length rs2 /\ rs_elems_inv rg h rs2 k l} ->
Lemma (requires (S.equal (S.slice rs1 i j) (S.slice rs2 k l)))
(ensures (S.equal (as_seq_seq rg h rs1 i j)
(as_seq_seq rg h rs2 k l)))
let as_seq_seq_eq #a #rst rg h rs1 rs2 i j k l =
assert (forall (a:nat{a < j - i}).
S.index (as_seq_seq rg h rs1 i j) a ==
Rgl?.r_repr rg h (S.index rs1 (i + a)));
assert (forall (a:nat{a < l - k}).
S.index (as_seq_seq rg h rs2 k l) a ==
Rgl?.r_repr rg h (S.index rs2 (k + a)));
assert (S.length (S.slice rs1 i j) = j - i);
assert (S.length (S.slice rs2 k l) = l - k);
assert (forall (a:nat{a < j - i}).
S.index (S.slice rs1 i j) a ==
S.index (S.slice rs2 k l) a);
assert (forall (a:nat{a < j - i}).
S.index rs1 (i + a) == S.index rs2 (k + a))
val as_seq_seq_slice:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat -> l:nat{k <= l && l <= j - i} ->
Lemma (S.equal (S.slice (as_seq_seq rg h rs i j) k l)
(as_seq_seq rg h (S.slice rs (i + k) (i + l)) 0 (l - k)))
#reset-options "--z3rlimit 10"
let rec as_seq_seq_slice #a #rst rg h rs i j k l =
if k = l then ()
else (as_seq_seq_slice rg h rs i j k (l - 1);
as_seq_seq_index rg h rs i j (l - 1);
as_seq_seq_eq rg h
(S.slice rs (i + k) (i + l - 1))
(S.slice rs (i + k) (i + l))
0 (l - k - 1) 0 (l - k - 1))
val as_seq_seq_upd:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{
i <= j /\
j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
k:nat{i <= k && k < j} -> v:a{rg_inv rg h v} ->
Lemma (S.equal (as_seq_seq rg h (S.upd rs k v) i j)
(S.upd (as_seq_seq rg h rs i j) (k - i)
(Rgl?.r_repr rg h v)))
let rec as_seq_seq_upd #a #rst rg h rs i j k v =
if i = j then ()
else if k = j - 1 then ()
else as_seq_seq_upd rg h rs i (j - 1) k v
// Preservation based on disjointness
val as_seq_seq_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv_preserved rg rs i j p h0 h1;
S.equal (as_seq_seq rg h0 rs i j)
(as_seq_seq rg h1 rs i j)))
let rec as_seq_seq_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
as_seq_seq_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val as_seq_sub_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
loc_disjoint p (V.loc_vector rv) /\
modifies p h0 h1))
(ensures (rv_elems_inv_preserved rv i j p h0 h1;
S.equal (as_seq_sub h0 rv i j)
(as_seq_sub h1 rv i j))) | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 10,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val as_seq_sub_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
loc_disjoint p (V.loc_vector rv) /\
modifies p h0 h1))
(ensures (rv_elems_inv_preserved rv i j p h0 h1;
S.equal (as_seq_sub h0 rv i j)
(as_seq_sub h1 rv i j))) | [] | LowStar.RVector.as_seq_sub_preserved | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
rv: LowStar.RVector.rvector rg ->
i: LowStar.Vector.uint32_t ->
j: LowStar.Vector.uint32_t{i <= j && j <= LowStar.Vector.size_of rv} ->
p: LowStar.Monotonic.Buffer.loc ->
h0: FStar.Monotonic.HyperStack.mem ->
h1: FStar.Monotonic.HyperStack.mem
-> FStar.Pervasives.Lemma
(requires
LowStar.Vector.live h0 rv /\ LowStar.RVector.rv_elems_inv h0 rv i j /\
LowStar.Monotonic.Buffer.loc_disjoint p (LowStar.RVector.rv_loc_elems h0 rv i j) /\
LowStar.Monotonic.Buffer.loc_disjoint p (LowStar.Vector.loc_vector rv) /\
LowStar.Monotonic.Buffer.modifies p h0 h1)
(ensures
([@@ FStar.Pervasives.inline_let ]let _ =
LowStar.RVector.rv_elems_inv_preserved rv i j p h0 h1
in
FStar.Seq.Base.equal (LowStar.RVector.as_seq_sub h0 rv i j)
(LowStar.RVector.as_seq_sub h1 rv i j))) | {
"end_col": 70,
"end_line": 648,
"start_col": 2,
"start_line": 648
} |
FStar.Pervasives.Lemma | val as_seq_seq_slice:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat -> l:nat{k <= l && l <= j - i} ->
Lemma (S.equal (S.slice (as_seq_seq rg h rs i j) k l)
(as_seq_seq rg h (S.slice rs (i + k) (i + l)) 0 (l - k))) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rec as_seq_seq_slice #a #rst rg h rs i j k l =
if k = l then ()
else (as_seq_seq_slice rg h rs i j k (l - 1);
as_seq_seq_index rg h rs i j (l - 1);
as_seq_seq_eq rg h
(S.slice rs (i + k) (i + l - 1))
(S.slice rs (i + k) (i + l))
0 (l - k - 1) 0 (l - k - 1)) | val as_seq_seq_slice:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat -> l:nat{k <= l && l <= j - i} ->
Lemma (S.equal (S.slice (as_seq_seq rg h rs i j) k l)
(as_seq_seq rg h (S.slice rs (i + k) (i + l)) 0 (l - k)))
let rec as_seq_seq_slice #a #rst rg h rs i j k l = | false | null | true | if k = l
then ()
else
(as_seq_seq_slice rg h rs i j k (l - 1);
as_seq_seq_index rg h rs i j (l - 1);
as_seq_seq_eq rg
h
(S.slice rs (i + k) (i + l - 1))
(S.slice rs (i + k) (i + l))
0
(l - k - 1)
0
(l - k - 1)) | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"lemma"
] | [
"LowStar.Regional.regional",
"FStar.Monotonic.HyperStack.mem",
"FStar.Seq.Base.seq",
"FStar.Integers.nat",
"Prims.l_and",
"Prims.b2t",
"FStar.Integers.op_Less_Equals",
"FStar.Integers.Signed",
"FStar.Integers.Winfinite",
"FStar.Seq.Base.length",
"LowStar.RVector.rs_elems_inv",
"Prims.op_AmpAmp",
"FStar.Integers.op_Subtraction",
"Prims.op_Equality",
"Prims.bool",
"LowStar.RVector.as_seq_seq_eq",
"FStar.Seq.Base.slice",
"FStar.Integers.op_Plus",
"Prims.unit",
"LowStar.RVector.as_seq_seq_index",
"LowStar.RVector.as_seq_seq_slice"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1))
val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j)))
let rv_loc_elems_included #a #rst #rg h rv i j =
rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
// Properties about disjointness of locations
val rs_loc_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
l:nat{i <= l && l < j && k <> l} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj #a #rst rg rs prid i j k l = ()
val rs_loc_elem_disj_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (
forall (k:nat{i <= k && k < j}).
forall (l:nat{i <= l && l < j && k <> l}).
loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj_forall #a #rst rg rs prid i j = ()
val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2)
let rec rs_loc_elems_elem_disj #a #rst rg rs prid i j k1 k2 l =
if k1 = k2 then ()
else (rs_loc_elem_disj rg rs prid i j (k2 - 1) l;
rs_loc_elems_elem_disj rg rs prid i j k1 (k2 - 1) l)
val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2)
let rec rs_loc_elems_disj #a #rst rg rs prid i j k1 k2 l1 l2 =
if k1 = k2 then ()
else (rs_loc_elems_elem_disj rg rs prid i j l1 l2 (k2 - 1);
rs_loc_elems_disj rg rs prid i j k1 (k2 - 1) l1 l2)
val rv_loc_elems_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
k1:uint32_t{i <= k1} ->
k2:uint32_t{k1 <= k2 && k2 <= j} ->
l1:uint32_t{i <= l1} ->
l2:uint32_t{l1 <= l2 && l2 <= j} ->
Lemma (requires (rv_elems_reg h rv i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rv_loc_elems h rv k1 k2)
(rv_loc_elems h rv l1 l2)))
let rv_loc_elems_disj #a #rst #rg h rv i j k1 k2 l1 l2 =
rs_loc_elems_disj rg (V.as_seq h rv) (V.frameOf rv)
(U32.v i) (U32.v j) (U32.v k1) (U32.v k2) (U32.v l1) (U32.v l2)
val rs_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_region_only false prid)))
(decreases j)
let rec rs_loc_elems_parent_disj #a #rst rg rs prid i j =
if i = j then ()
else rs_loc_elems_parent_disj rg rs prid i (j - 1)
val rv_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_region_only false (V.frameOf rv))))
let rv_loc_elems_parent_disj #a #rst #rg h rv i j =
rs_loc_elems_parent_disj rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val rs_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> drid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (V.forall_seq rs i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_all_regions_from false drid)))
(decreases j)
let rec rs_loc_elems_each_disj #a #rst rg rs drid i j =
if i = j then ()
else rs_loc_elems_each_disj rg rs drid i (j - 1)
val rv_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
drid:HS.rid ->
Lemma (requires (V.forall_ h rv i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_all_regions_from false drid)))
let rv_loc_elems_each_disj #a #rst #rg h rv i j drid =
rs_loc_elems_each_disj rg (V.as_seq h rv) drid (U32.v i) (U32.v j)
// Preservation based on disjointness
val rv_loc_elems_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
loc_disjoint p (V.loc_vector_within rv i j) /\
modifies p h0 h1))
(ensures (rv_loc_elems h0 rv i j ==
rv_loc_elems h1 rv i j))
(decreases (U32.v j))
let rec rv_loc_elems_preserved #a #rst #rg rv i j p h0 h1 =
if i = j then ()
else (V.loc_vector_within_includes rv i j (j - 1ul) j;
V.get_preserved rv (j - 1ul) p h0 h1;
assert (V.get h0 rv (j - 1ul) == V.get h1 rv (j - 1ul));
V.loc_vector_within_includes rv i j i (j - 1ul);
rv_loc_elems_preserved rv i (j - 1ul) p h0 h1)
val rs_elems_inv_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv rg h1 rs i j))
(decreases j)
let rec rs_elems_inv_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val rv_elems_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (V.loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
modifies p h0 h1))
(ensures (rv_elems_inv h1 rv i j))
let rv_elems_inv_preserved #a #rst #rg rv i j p h0 h1 =
rs_elems_inv_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val rv_inv_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_ #a #rst #rg rv p h0 h1 =
rv_elems_inv_preserved #a #rst #rg rv 0ul (V.size_of rv) p h0 h1
// The first core lemma of `rvector`
val rv_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let rv_inv_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
rv_inv_preserved_ rv p h0 h1
val rv_inv_preserved_int:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
modifies (loc_all_regions_from false
(Rgl?.region_of rg (V.get h0 rv i))) h0 h1 /\
rg_inv rg h1 (V.get h1 rv i)))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_int #a #rst #rg rv i h0 h1 =
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv)) 0 (U32.v i) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) 0 (U32.v i)
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1;
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv))
(U32.v i + 1) (U32.v (V.size_of rv)) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) (U32.v i + 1) (U32.v (V.size_of rv))
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1
/// Representation
val as_seq_seq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = j - i})
(decreases j)
let rec as_seq_seq #a #rst rg h rs i j =
if i = j then S.empty
else S.snoc (as_seq_seq rg h rs i (j - 1))
(Rgl?.r_repr rg h (S.index rs (j - 1)))
val as_seq_sub:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t ->
j:uint32_t{
i <= j /\
j <= V.size_of rv /\
rv_elems_inv h rv i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v j - U32.v i})
(decreases (U32.v j))
let as_seq_sub #a #rst #rg h rv i j =
as_seq_seq rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v (V.size_of rv)})
let as_seq #a #rst #rg h rv =
as_seq_sub h rv 0ul (V.size_of rv)
val as_seq_sub_as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
Lemma (S.equal (as_seq_sub h rv 0ul (V.size_of rv))
(as_seq h rv))
[SMTPat (as_seq_sub h rv 0ul (V.size_of rv))]
let as_seq_sub_as_seq #a #rst #rg h rv = ()
val as_seq_seq_index:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat{k < j - i} ->
Lemma (requires true)
(ensures (S.index (as_seq_seq rg h rs i j) k ==
Rgl?.r_repr rg h (S.index rs (i + k))))
(decreases j)
[SMTPat (S.index (as_seq_seq rg h rs i j) k)]
let rec as_seq_seq_index #a #rst rg h rs i j k =
if i = j then ()
else if k = j - i - 1 then ()
else as_seq_seq_index rg h rs i (j - 1) k
val as_seq_seq_eq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs1:S.seq a -> rs2:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs1 /\ rs_elems_inv rg h rs1 i j} ->
k:nat ->
l:nat{k <= l /\ l <= S.length rs2 /\ rs_elems_inv rg h rs2 k l} ->
Lemma (requires (S.equal (S.slice rs1 i j) (S.slice rs2 k l)))
(ensures (S.equal (as_seq_seq rg h rs1 i j)
(as_seq_seq rg h rs2 k l)))
let as_seq_seq_eq #a #rst rg h rs1 rs2 i j k l =
assert (forall (a:nat{a < j - i}).
S.index (as_seq_seq rg h rs1 i j) a ==
Rgl?.r_repr rg h (S.index rs1 (i + a)));
assert (forall (a:nat{a < l - k}).
S.index (as_seq_seq rg h rs2 k l) a ==
Rgl?.r_repr rg h (S.index rs2 (k + a)));
assert (S.length (S.slice rs1 i j) = j - i);
assert (S.length (S.slice rs2 k l) = l - k);
assert (forall (a:nat{a < j - i}).
S.index (S.slice rs1 i j) a ==
S.index (S.slice rs2 k l) a);
assert (forall (a:nat{a < j - i}).
S.index rs1 (i + a) == S.index rs2 (k + a))
val as_seq_seq_slice:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat -> l:nat{k <= l && l <= j - i} ->
Lemma (S.equal (S.slice (as_seq_seq rg h rs i j) k l)
(as_seq_seq rg h (S.slice rs (i + k) (i + l)) 0 (l - k)))
#reset-options "--z3rlimit 10" | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 10,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val as_seq_seq_slice:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat -> l:nat{k <= l && l <= j - i} ->
Lemma (S.equal (S.slice (as_seq_seq rg h rs i j) k l)
(as_seq_seq rg h (S.slice rs (i + k) (i + l)) 0 (l - k))) | [
"recursion"
] | LowStar.RVector.as_seq_seq_slice | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
rg: LowStar.Regional.regional rst a ->
h: FStar.Monotonic.HyperStack.mem ->
rs: FStar.Seq.Base.seq a ->
i: FStar.Integers.nat ->
j:
FStar.Integers.nat
{i <= j /\ j <= FStar.Seq.Base.length rs /\ LowStar.RVector.rs_elems_inv rg h rs i j} ->
k: FStar.Integers.nat ->
l: FStar.Integers.nat{k <= l && l <= j - i}
-> FStar.Pervasives.Lemma
(ensures
FStar.Seq.Base.equal (FStar.Seq.Base.slice (LowStar.RVector.as_seq_seq rg h rs i j) k l)
(LowStar.RVector.as_seq_seq rg h (FStar.Seq.Base.slice rs (i + k) (i + l)) 0 (l - k))) | {
"end_col": 37,
"end_line": 597,
"start_col": 2,
"start_line": 591
} |
FStar.Pervasives.Lemma | val as_seq_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv_preserved_ rv p h0 h1;
S.equal (as_seq h0 rv) (as_seq h1 rv))) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let as_seq_preserved_ #a #rst #rg rv p h0 h1 =
as_seq_sub_preserved rv 0ul (V.size_of rv) p h0 h1 | val as_seq_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv_preserved_ rv p h0 h1;
S.equal (as_seq h0 rv) (as_seq h1 rv)))
let as_seq_preserved_ #a #rst #rg rv p h0 h1 = | false | null | true | as_seq_sub_preserved rv 0ul (V.size_of rv) p h0 h1 | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"lemma"
] | [
"LowStar.Regional.regional",
"LowStar.RVector.rvector",
"LowStar.Monotonic.Buffer.loc",
"FStar.Monotonic.HyperStack.mem",
"LowStar.RVector.as_seq_sub_preserved",
"FStar.UInt32.__uint_to_t",
"LowStar.Vector.size_of",
"Prims.unit"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1))
val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j)))
let rv_loc_elems_included #a #rst #rg h rv i j =
rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
// Properties about disjointness of locations
val rs_loc_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
l:nat{i <= l && l < j && k <> l} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj #a #rst rg rs prid i j k l = ()
val rs_loc_elem_disj_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (
forall (k:nat{i <= k && k < j}).
forall (l:nat{i <= l && l < j && k <> l}).
loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj_forall #a #rst rg rs prid i j = ()
val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2)
let rec rs_loc_elems_elem_disj #a #rst rg rs prid i j k1 k2 l =
if k1 = k2 then ()
else (rs_loc_elem_disj rg rs prid i j (k2 - 1) l;
rs_loc_elems_elem_disj rg rs prid i j k1 (k2 - 1) l)
val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2)
let rec rs_loc_elems_disj #a #rst rg rs prid i j k1 k2 l1 l2 =
if k1 = k2 then ()
else (rs_loc_elems_elem_disj rg rs prid i j l1 l2 (k2 - 1);
rs_loc_elems_disj rg rs prid i j k1 (k2 - 1) l1 l2)
val rv_loc_elems_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
k1:uint32_t{i <= k1} ->
k2:uint32_t{k1 <= k2 && k2 <= j} ->
l1:uint32_t{i <= l1} ->
l2:uint32_t{l1 <= l2 && l2 <= j} ->
Lemma (requires (rv_elems_reg h rv i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rv_loc_elems h rv k1 k2)
(rv_loc_elems h rv l1 l2)))
let rv_loc_elems_disj #a #rst #rg h rv i j k1 k2 l1 l2 =
rs_loc_elems_disj rg (V.as_seq h rv) (V.frameOf rv)
(U32.v i) (U32.v j) (U32.v k1) (U32.v k2) (U32.v l1) (U32.v l2)
val rs_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_region_only false prid)))
(decreases j)
let rec rs_loc_elems_parent_disj #a #rst rg rs prid i j =
if i = j then ()
else rs_loc_elems_parent_disj rg rs prid i (j - 1)
val rv_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_region_only false (V.frameOf rv))))
let rv_loc_elems_parent_disj #a #rst #rg h rv i j =
rs_loc_elems_parent_disj rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val rs_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> drid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (V.forall_seq rs i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_all_regions_from false drid)))
(decreases j)
let rec rs_loc_elems_each_disj #a #rst rg rs drid i j =
if i = j then ()
else rs_loc_elems_each_disj rg rs drid i (j - 1)
val rv_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
drid:HS.rid ->
Lemma (requires (V.forall_ h rv i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_all_regions_from false drid)))
let rv_loc_elems_each_disj #a #rst #rg h rv i j drid =
rs_loc_elems_each_disj rg (V.as_seq h rv) drid (U32.v i) (U32.v j)
// Preservation based on disjointness
val rv_loc_elems_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
loc_disjoint p (V.loc_vector_within rv i j) /\
modifies p h0 h1))
(ensures (rv_loc_elems h0 rv i j ==
rv_loc_elems h1 rv i j))
(decreases (U32.v j))
let rec rv_loc_elems_preserved #a #rst #rg rv i j p h0 h1 =
if i = j then ()
else (V.loc_vector_within_includes rv i j (j - 1ul) j;
V.get_preserved rv (j - 1ul) p h0 h1;
assert (V.get h0 rv (j - 1ul) == V.get h1 rv (j - 1ul));
V.loc_vector_within_includes rv i j i (j - 1ul);
rv_loc_elems_preserved rv i (j - 1ul) p h0 h1)
val rs_elems_inv_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv rg h1 rs i j))
(decreases j)
let rec rs_elems_inv_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val rv_elems_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (V.loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
modifies p h0 h1))
(ensures (rv_elems_inv h1 rv i j))
let rv_elems_inv_preserved #a #rst #rg rv i j p h0 h1 =
rs_elems_inv_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val rv_inv_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_ #a #rst #rg rv p h0 h1 =
rv_elems_inv_preserved #a #rst #rg rv 0ul (V.size_of rv) p h0 h1
// The first core lemma of `rvector`
val rv_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let rv_inv_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
rv_inv_preserved_ rv p h0 h1
val rv_inv_preserved_int:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
modifies (loc_all_regions_from false
(Rgl?.region_of rg (V.get h0 rv i))) h0 h1 /\
rg_inv rg h1 (V.get h1 rv i)))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_int #a #rst #rg rv i h0 h1 =
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv)) 0 (U32.v i) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) 0 (U32.v i)
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1;
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv))
(U32.v i + 1) (U32.v (V.size_of rv)) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) (U32.v i + 1) (U32.v (V.size_of rv))
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1
/// Representation
val as_seq_seq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = j - i})
(decreases j)
let rec as_seq_seq #a #rst rg h rs i j =
if i = j then S.empty
else S.snoc (as_seq_seq rg h rs i (j - 1))
(Rgl?.r_repr rg h (S.index rs (j - 1)))
val as_seq_sub:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t ->
j:uint32_t{
i <= j /\
j <= V.size_of rv /\
rv_elems_inv h rv i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v j - U32.v i})
(decreases (U32.v j))
let as_seq_sub #a #rst #rg h rv i j =
as_seq_seq rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v (V.size_of rv)})
let as_seq #a #rst #rg h rv =
as_seq_sub h rv 0ul (V.size_of rv)
val as_seq_sub_as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
Lemma (S.equal (as_seq_sub h rv 0ul (V.size_of rv))
(as_seq h rv))
[SMTPat (as_seq_sub h rv 0ul (V.size_of rv))]
let as_seq_sub_as_seq #a #rst #rg h rv = ()
val as_seq_seq_index:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat{k < j - i} ->
Lemma (requires true)
(ensures (S.index (as_seq_seq rg h rs i j) k ==
Rgl?.r_repr rg h (S.index rs (i + k))))
(decreases j)
[SMTPat (S.index (as_seq_seq rg h rs i j) k)]
let rec as_seq_seq_index #a #rst rg h rs i j k =
if i = j then ()
else if k = j - i - 1 then ()
else as_seq_seq_index rg h rs i (j - 1) k
val as_seq_seq_eq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs1:S.seq a -> rs2:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs1 /\ rs_elems_inv rg h rs1 i j} ->
k:nat ->
l:nat{k <= l /\ l <= S.length rs2 /\ rs_elems_inv rg h rs2 k l} ->
Lemma (requires (S.equal (S.slice rs1 i j) (S.slice rs2 k l)))
(ensures (S.equal (as_seq_seq rg h rs1 i j)
(as_seq_seq rg h rs2 k l)))
let as_seq_seq_eq #a #rst rg h rs1 rs2 i j k l =
assert (forall (a:nat{a < j - i}).
S.index (as_seq_seq rg h rs1 i j) a ==
Rgl?.r_repr rg h (S.index rs1 (i + a)));
assert (forall (a:nat{a < l - k}).
S.index (as_seq_seq rg h rs2 k l) a ==
Rgl?.r_repr rg h (S.index rs2 (k + a)));
assert (S.length (S.slice rs1 i j) = j - i);
assert (S.length (S.slice rs2 k l) = l - k);
assert (forall (a:nat{a < j - i}).
S.index (S.slice rs1 i j) a ==
S.index (S.slice rs2 k l) a);
assert (forall (a:nat{a < j - i}).
S.index rs1 (i + a) == S.index rs2 (k + a))
val as_seq_seq_slice:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat -> l:nat{k <= l && l <= j - i} ->
Lemma (S.equal (S.slice (as_seq_seq rg h rs i j) k l)
(as_seq_seq rg h (S.slice rs (i + k) (i + l)) 0 (l - k)))
#reset-options "--z3rlimit 10"
let rec as_seq_seq_slice #a #rst rg h rs i j k l =
if k = l then ()
else (as_seq_seq_slice rg h rs i j k (l - 1);
as_seq_seq_index rg h rs i j (l - 1);
as_seq_seq_eq rg h
(S.slice rs (i + k) (i + l - 1))
(S.slice rs (i + k) (i + l))
0 (l - k - 1) 0 (l - k - 1))
val as_seq_seq_upd:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{
i <= j /\
j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
k:nat{i <= k && k < j} -> v:a{rg_inv rg h v} ->
Lemma (S.equal (as_seq_seq rg h (S.upd rs k v) i j)
(S.upd (as_seq_seq rg h rs i j) (k - i)
(Rgl?.r_repr rg h v)))
let rec as_seq_seq_upd #a #rst rg h rs i j k v =
if i = j then ()
else if k = j - 1 then ()
else as_seq_seq_upd rg h rs i (j - 1) k v
// Preservation based on disjointness
val as_seq_seq_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv_preserved rg rs i j p h0 h1;
S.equal (as_seq_seq rg h0 rs i j)
(as_seq_seq rg h1 rs i j)))
let rec as_seq_seq_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
as_seq_seq_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val as_seq_sub_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
loc_disjoint p (V.loc_vector rv) /\
modifies p h0 h1))
(ensures (rv_elems_inv_preserved rv i j p h0 h1;
S.equal (as_seq_sub h0 rv i j)
(as_seq_sub h1 rv i j)))
let as_seq_sub_preserved #a #rst #rg rv i j p h0 h1 =
as_seq_seq_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val as_seq_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv_preserved_ rv p h0 h1;
S.equal (as_seq h0 rv) (as_seq h1 rv))) | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 10,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val as_seq_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv_preserved_ rv p h0 h1;
S.equal (as_seq h0 rv) (as_seq h1 rv))) | [] | LowStar.RVector.as_seq_preserved_ | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
rv: LowStar.RVector.rvector rg ->
p: LowStar.Monotonic.Buffer.loc ->
h0: FStar.Monotonic.HyperStack.mem ->
h1: FStar.Monotonic.HyperStack.mem
-> FStar.Pervasives.Lemma
(requires
LowStar.RVector.rv_inv h0 rv /\
LowStar.Monotonic.Buffer.loc_disjoint p (LowStar.Vector.loc_vector rv) /\
LowStar.Monotonic.Buffer.loc_disjoint p
(LowStar.RVector.rv_loc_elems h0 rv 0ul (LowStar.Vector.size_of rv)) /\
LowStar.Monotonic.Buffer.modifies p h0 h1)
(ensures
([@@ FStar.Pervasives.inline_let ]let _ = LowStar.RVector.rv_inv_preserved_ rv p h0 h1 in
FStar.Seq.Base.equal (LowStar.RVector.as_seq h0 rv) (LowStar.RVector.as_seq h1 rv))) | {
"end_col": 52,
"end_line": 662,
"start_col": 2,
"start_line": 662
} |
FStar.Pervasives.Lemma | val as_seq_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv_preserved rv p h0 h1;
S.equal (as_seq h0 rv) (as_seq h1 rv)))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)] | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let as_seq_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
as_seq_preserved_ rv p h0 h1 | val as_seq_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv_preserved rv p h0 h1;
S.equal (as_seq h0 rv) (as_seq h1 rv)))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let as_seq_preserved #a #rst #rg rv p h0 h1 = | false | null | true | assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
as_seq_preserved_ rv p h0 h1 | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"lemma"
] | [
"LowStar.Regional.regional",
"LowStar.RVector.rvector",
"LowStar.Monotonic.Buffer.loc",
"FStar.Monotonic.HyperStack.mem",
"LowStar.RVector.as_seq_preserved_",
"Prims.unit",
"Prims._assert",
"LowStar.Monotonic.Buffer.loc_includes",
"LowStar.RVector.loc_rvector",
"LowStar.RVector.rv_loc_elems",
"FStar.UInt32.__uint_to_t",
"LowStar.Vector.size_of",
"LowStar.RVector.rv_loc_elems_included",
"LowStar.Vector.loc_vector"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1))
val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j)))
let rv_loc_elems_included #a #rst #rg h rv i j =
rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
// Properties about disjointness of locations
val rs_loc_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
l:nat{i <= l && l < j && k <> l} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj #a #rst rg rs prid i j k l = ()
val rs_loc_elem_disj_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (
forall (k:nat{i <= k && k < j}).
forall (l:nat{i <= l && l < j && k <> l}).
loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj_forall #a #rst rg rs prid i j = ()
val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2)
let rec rs_loc_elems_elem_disj #a #rst rg rs prid i j k1 k2 l =
if k1 = k2 then ()
else (rs_loc_elem_disj rg rs prid i j (k2 - 1) l;
rs_loc_elems_elem_disj rg rs prid i j k1 (k2 - 1) l)
val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2)
let rec rs_loc_elems_disj #a #rst rg rs prid i j k1 k2 l1 l2 =
if k1 = k2 then ()
else (rs_loc_elems_elem_disj rg rs prid i j l1 l2 (k2 - 1);
rs_loc_elems_disj rg rs prid i j k1 (k2 - 1) l1 l2)
val rv_loc_elems_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
k1:uint32_t{i <= k1} ->
k2:uint32_t{k1 <= k2 && k2 <= j} ->
l1:uint32_t{i <= l1} ->
l2:uint32_t{l1 <= l2 && l2 <= j} ->
Lemma (requires (rv_elems_reg h rv i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rv_loc_elems h rv k1 k2)
(rv_loc_elems h rv l1 l2)))
let rv_loc_elems_disj #a #rst #rg h rv i j k1 k2 l1 l2 =
rs_loc_elems_disj rg (V.as_seq h rv) (V.frameOf rv)
(U32.v i) (U32.v j) (U32.v k1) (U32.v k2) (U32.v l1) (U32.v l2)
val rs_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_region_only false prid)))
(decreases j)
let rec rs_loc_elems_parent_disj #a #rst rg rs prid i j =
if i = j then ()
else rs_loc_elems_parent_disj rg rs prid i (j - 1)
val rv_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_region_only false (V.frameOf rv))))
let rv_loc_elems_parent_disj #a #rst #rg h rv i j =
rs_loc_elems_parent_disj rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val rs_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> drid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (V.forall_seq rs i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_all_regions_from false drid)))
(decreases j)
let rec rs_loc_elems_each_disj #a #rst rg rs drid i j =
if i = j then ()
else rs_loc_elems_each_disj rg rs drid i (j - 1)
val rv_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
drid:HS.rid ->
Lemma (requires (V.forall_ h rv i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_all_regions_from false drid)))
let rv_loc_elems_each_disj #a #rst #rg h rv i j drid =
rs_loc_elems_each_disj rg (V.as_seq h rv) drid (U32.v i) (U32.v j)
// Preservation based on disjointness
val rv_loc_elems_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
loc_disjoint p (V.loc_vector_within rv i j) /\
modifies p h0 h1))
(ensures (rv_loc_elems h0 rv i j ==
rv_loc_elems h1 rv i j))
(decreases (U32.v j))
let rec rv_loc_elems_preserved #a #rst #rg rv i j p h0 h1 =
if i = j then ()
else (V.loc_vector_within_includes rv i j (j - 1ul) j;
V.get_preserved rv (j - 1ul) p h0 h1;
assert (V.get h0 rv (j - 1ul) == V.get h1 rv (j - 1ul));
V.loc_vector_within_includes rv i j i (j - 1ul);
rv_loc_elems_preserved rv i (j - 1ul) p h0 h1)
val rs_elems_inv_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv rg h1 rs i j))
(decreases j)
let rec rs_elems_inv_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val rv_elems_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (V.loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
modifies p h0 h1))
(ensures (rv_elems_inv h1 rv i j))
let rv_elems_inv_preserved #a #rst #rg rv i j p h0 h1 =
rs_elems_inv_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val rv_inv_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_ #a #rst #rg rv p h0 h1 =
rv_elems_inv_preserved #a #rst #rg rv 0ul (V.size_of rv) p h0 h1
// The first core lemma of `rvector`
val rv_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let rv_inv_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
rv_inv_preserved_ rv p h0 h1
val rv_inv_preserved_int:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
modifies (loc_all_regions_from false
(Rgl?.region_of rg (V.get h0 rv i))) h0 h1 /\
rg_inv rg h1 (V.get h1 rv i)))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_int #a #rst #rg rv i h0 h1 =
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv)) 0 (U32.v i) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) 0 (U32.v i)
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1;
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv))
(U32.v i + 1) (U32.v (V.size_of rv)) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) (U32.v i + 1) (U32.v (V.size_of rv))
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1
/// Representation
val as_seq_seq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = j - i})
(decreases j)
let rec as_seq_seq #a #rst rg h rs i j =
if i = j then S.empty
else S.snoc (as_seq_seq rg h rs i (j - 1))
(Rgl?.r_repr rg h (S.index rs (j - 1)))
val as_seq_sub:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t ->
j:uint32_t{
i <= j /\
j <= V.size_of rv /\
rv_elems_inv h rv i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v j - U32.v i})
(decreases (U32.v j))
let as_seq_sub #a #rst #rg h rv i j =
as_seq_seq rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v (V.size_of rv)})
let as_seq #a #rst #rg h rv =
as_seq_sub h rv 0ul (V.size_of rv)
val as_seq_sub_as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
Lemma (S.equal (as_seq_sub h rv 0ul (V.size_of rv))
(as_seq h rv))
[SMTPat (as_seq_sub h rv 0ul (V.size_of rv))]
let as_seq_sub_as_seq #a #rst #rg h rv = ()
val as_seq_seq_index:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat{k < j - i} ->
Lemma (requires true)
(ensures (S.index (as_seq_seq rg h rs i j) k ==
Rgl?.r_repr rg h (S.index rs (i + k))))
(decreases j)
[SMTPat (S.index (as_seq_seq rg h rs i j) k)]
let rec as_seq_seq_index #a #rst rg h rs i j k =
if i = j then ()
else if k = j - i - 1 then ()
else as_seq_seq_index rg h rs i (j - 1) k
val as_seq_seq_eq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs1:S.seq a -> rs2:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs1 /\ rs_elems_inv rg h rs1 i j} ->
k:nat ->
l:nat{k <= l /\ l <= S.length rs2 /\ rs_elems_inv rg h rs2 k l} ->
Lemma (requires (S.equal (S.slice rs1 i j) (S.slice rs2 k l)))
(ensures (S.equal (as_seq_seq rg h rs1 i j)
(as_seq_seq rg h rs2 k l)))
let as_seq_seq_eq #a #rst rg h rs1 rs2 i j k l =
assert (forall (a:nat{a < j - i}).
S.index (as_seq_seq rg h rs1 i j) a ==
Rgl?.r_repr rg h (S.index rs1 (i + a)));
assert (forall (a:nat{a < l - k}).
S.index (as_seq_seq rg h rs2 k l) a ==
Rgl?.r_repr rg h (S.index rs2 (k + a)));
assert (S.length (S.slice rs1 i j) = j - i);
assert (S.length (S.slice rs2 k l) = l - k);
assert (forall (a:nat{a < j - i}).
S.index (S.slice rs1 i j) a ==
S.index (S.slice rs2 k l) a);
assert (forall (a:nat{a < j - i}).
S.index rs1 (i + a) == S.index rs2 (k + a))
val as_seq_seq_slice:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat -> l:nat{k <= l && l <= j - i} ->
Lemma (S.equal (S.slice (as_seq_seq rg h rs i j) k l)
(as_seq_seq rg h (S.slice rs (i + k) (i + l)) 0 (l - k)))
#reset-options "--z3rlimit 10"
let rec as_seq_seq_slice #a #rst rg h rs i j k l =
if k = l then ()
else (as_seq_seq_slice rg h rs i j k (l - 1);
as_seq_seq_index rg h rs i j (l - 1);
as_seq_seq_eq rg h
(S.slice rs (i + k) (i + l - 1))
(S.slice rs (i + k) (i + l))
0 (l - k - 1) 0 (l - k - 1))
val as_seq_seq_upd:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{
i <= j /\
j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
k:nat{i <= k && k < j} -> v:a{rg_inv rg h v} ->
Lemma (S.equal (as_seq_seq rg h (S.upd rs k v) i j)
(S.upd (as_seq_seq rg h rs i j) (k - i)
(Rgl?.r_repr rg h v)))
let rec as_seq_seq_upd #a #rst rg h rs i j k v =
if i = j then ()
else if k = j - 1 then ()
else as_seq_seq_upd rg h rs i (j - 1) k v
// Preservation based on disjointness
val as_seq_seq_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv_preserved rg rs i j p h0 h1;
S.equal (as_seq_seq rg h0 rs i j)
(as_seq_seq rg h1 rs i j)))
let rec as_seq_seq_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
as_seq_seq_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val as_seq_sub_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
loc_disjoint p (V.loc_vector rv) /\
modifies p h0 h1))
(ensures (rv_elems_inv_preserved rv i j p h0 h1;
S.equal (as_seq_sub h0 rv i j)
(as_seq_sub h1 rv i j)))
let as_seq_sub_preserved #a #rst #rg rv i j p h0 h1 =
as_seq_seq_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val as_seq_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv_preserved_ rv p h0 h1;
S.equal (as_seq h0 rv) (as_seq h1 rv)))
let as_seq_preserved_ #a #rst #rg rv p h0 h1 =
as_seq_sub_preserved rv 0ul (V.size_of rv) p h0 h1
// The second core lemma of `rvector`
val as_seq_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv_preserved rv p h0 h1;
S.equal (as_seq h0 rv) (as_seq h1 rv)))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)] | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 10,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val as_seq_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv_preserved rv p h0 h1;
S.equal (as_seq h0 rv) (as_seq h1 rv)))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)] | [] | LowStar.RVector.as_seq_preserved | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
rv: LowStar.RVector.rvector rg ->
p: LowStar.Monotonic.Buffer.loc ->
h0: FStar.Monotonic.HyperStack.mem ->
h1: FStar.Monotonic.HyperStack.mem
-> FStar.Pervasives.Lemma
(requires
LowStar.RVector.rv_inv h0 rv /\
LowStar.Monotonic.Buffer.loc_disjoint p (LowStar.RVector.loc_rvector rv) /\
LowStar.Monotonic.Buffer.modifies p h0 h1)
(ensures
([@@ FStar.Pervasives.inline_let ]let _ = LowStar.RVector.rv_inv_preserved rv p h0 h1 in
FStar.Seq.Base.equal (LowStar.RVector.as_seq h0 rv) (LowStar.RVector.as_seq h1 rv)))
[
SMTPat (LowStar.RVector.rv_inv h0 rv);
SMTPat (LowStar.Monotonic.Buffer.loc_disjoint p (LowStar.RVector.loc_rvector rv));
SMTPat (LowStar.Monotonic.Buffer.modifies p h0 h1)
] | {
"end_col": 30,
"end_line": 682,
"start_col": 2,
"start_line": 679
} |
FStar.Pervasives.Lemma | val r_sep_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
v:a{rg_inv rg h0 v} ->
Lemma (requires (loc_disjoint (loc_all_regions_from
false (Rgl?.region_of rg v)) p /\
modifies p h0 h1))
(ensures (rg_inv rg h1 v /\
Rgl?.r_repr rg h0 v == Rgl?.r_repr rg h1 v)) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let r_sep_forall #a #rst rg p h0 h1 v =
Rgl?.r_sep rg v p h0 h1 | val r_sep_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
v:a{rg_inv rg h0 v} ->
Lemma (requires (loc_disjoint (loc_all_regions_from
false (Rgl?.region_of rg v)) p /\
modifies p h0 h1))
(ensures (rg_inv rg h1 v /\
Rgl?.r_repr rg h0 v == Rgl?.r_repr rg h1 v))
let r_sep_forall #a #rst rg p h0 h1 v = | false | null | true | Rgl?.r_sep rg v p h0 h1 | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
"lemma"
] | [
"LowStar.Regional.regional",
"LowStar.Monotonic.Buffer.loc",
"FStar.Monotonic.HyperStack.mem",
"LowStar.Regional.rg_inv",
"LowStar.Regional.__proj__Rgl__item__r_sep",
"Prims.unit"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1))
val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j)))
let rv_loc_elems_included #a #rst #rg h rv i j =
rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
// Properties about disjointness of locations
val rs_loc_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
l:nat{i <= l && l < j && k <> l} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj #a #rst rg rs prid i j k l = ()
val rs_loc_elem_disj_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (
forall (k:nat{i <= k && k < j}).
forall (l:nat{i <= l && l < j && k <> l}).
loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj_forall #a #rst rg rs prid i j = ()
val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2)
let rec rs_loc_elems_elem_disj #a #rst rg rs prid i j k1 k2 l =
if k1 = k2 then ()
else (rs_loc_elem_disj rg rs prid i j (k2 - 1) l;
rs_loc_elems_elem_disj rg rs prid i j k1 (k2 - 1) l)
val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2)
let rec rs_loc_elems_disj #a #rst rg rs prid i j k1 k2 l1 l2 =
if k1 = k2 then ()
else (rs_loc_elems_elem_disj rg rs prid i j l1 l2 (k2 - 1);
rs_loc_elems_disj rg rs prid i j k1 (k2 - 1) l1 l2)
val rv_loc_elems_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
k1:uint32_t{i <= k1} ->
k2:uint32_t{k1 <= k2 && k2 <= j} ->
l1:uint32_t{i <= l1} ->
l2:uint32_t{l1 <= l2 && l2 <= j} ->
Lemma (requires (rv_elems_reg h rv i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rv_loc_elems h rv k1 k2)
(rv_loc_elems h rv l1 l2)))
let rv_loc_elems_disj #a #rst #rg h rv i j k1 k2 l1 l2 =
rs_loc_elems_disj rg (V.as_seq h rv) (V.frameOf rv)
(U32.v i) (U32.v j) (U32.v k1) (U32.v k2) (U32.v l1) (U32.v l2)
val rs_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_region_only false prid)))
(decreases j)
let rec rs_loc_elems_parent_disj #a #rst rg rs prid i j =
if i = j then ()
else rs_loc_elems_parent_disj rg rs prid i (j - 1)
val rv_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_region_only false (V.frameOf rv))))
let rv_loc_elems_parent_disj #a #rst #rg h rv i j =
rs_loc_elems_parent_disj rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val rs_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> drid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (V.forall_seq rs i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_all_regions_from false drid)))
(decreases j)
let rec rs_loc_elems_each_disj #a #rst rg rs drid i j =
if i = j then ()
else rs_loc_elems_each_disj rg rs drid i (j - 1)
val rv_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
drid:HS.rid ->
Lemma (requires (V.forall_ h rv i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_all_regions_from false drid)))
let rv_loc_elems_each_disj #a #rst #rg h rv i j drid =
rs_loc_elems_each_disj rg (V.as_seq h rv) drid (U32.v i) (U32.v j)
// Preservation based on disjointness
val rv_loc_elems_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
loc_disjoint p (V.loc_vector_within rv i j) /\
modifies p h0 h1))
(ensures (rv_loc_elems h0 rv i j ==
rv_loc_elems h1 rv i j))
(decreases (U32.v j))
let rec rv_loc_elems_preserved #a #rst #rg rv i j p h0 h1 =
if i = j then ()
else (V.loc_vector_within_includes rv i j (j - 1ul) j;
V.get_preserved rv (j - 1ul) p h0 h1;
assert (V.get h0 rv (j - 1ul) == V.get h1 rv (j - 1ul));
V.loc_vector_within_includes rv i j i (j - 1ul);
rv_loc_elems_preserved rv i (j - 1ul) p h0 h1)
val rs_elems_inv_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv rg h1 rs i j))
(decreases j)
let rec rs_elems_inv_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val rv_elems_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (V.loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
modifies p h0 h1))
(ensures (rv_elems_inv h1 rv i j))
let rv_elems_inv_preserved #a #rst #rg rv i j p h0 h1 =
rs_elems_inv_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val rv_inv_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_ #a #rst #rg rv p h0 h1 =
rv_elems_inv_preserved #a #rst #rg rv 0ul (V.size_of rv) p h0 h1
// The first core lemma of `rvector`
val rv_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let rv_inv_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
rv_inv_preserved_ rv p h0 h1
val rv_inv_preserved_int:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
modifies (loc_all_regions_from false
(Rgl?.region_of rg (V.get h0 rv i))) h0 h1 /\
rg_inv rg h1 (V.get h1 rv i)))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_int #a #rst #rg rv i h0 h1 =
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv)) 0 (U32.v i) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) 0 (U32.v i)
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1;
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv))
(U32.v i + 1) (U32.v (V.size_of rv)) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) (U32.v i + 1) (U32.v (V.size_of rv))
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1
/// Representation
val as_seq_seq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = j - i})
(decreases j)
let rec as_seq_seq #a #rst rg h rs i j =
if i = j then S.empty
else S.snoc (as_seq_seq rg h rs i (j - 1))
(Rgl?.r_repr rg h (S.index rs (j - 1)))
val as_seq_sub:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t ->
j:uint32_t{
i <= j /\
j <= V.size_of rv /\
rv_elems_inv h rv i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v j - U32.v i})
(decreases (U32.v j))
let as_seq_sub #a #rst #rg h rv i j =
as_seq_seq rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v (V.size_of rv)})
let as_seq #a #rst #rg h rv =
as_seq_sub h rv 0ul (V.size_of rv)
val as_seq_sub_as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
Lemma (S.equal (as_seq_sub h rv 0ul (V.size_of rv))
(as_seq h rv))
[SMTPat (as_seq_sub h rv 0ul (V.size_of rv))]
let as_seq_sub_as_seq #a #rst #rg h rv = ()
val as_seq_seq_index:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat{k < j - i} ->
Lemma (requires true)
(ensures (S.index (as_seq_seq rg h rs i j) k ==
Rgl?.r_repr rg h (S.index rs (i + k))))
(decreases j)
[SMTPat (S.index (as_seq_seq rg h rs i j) k)]
let rec as_seq_seq_index #a #rst rg h rs i j k =
if i = j then ()
else if k = j - i - 1 then ()
else as_seq_seq_index rg h rs i (j - 1) k
val as_seq_seq_eq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs1:S.seq a -> rs2:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs1 /\ rs_elems_inv rg h rs1 i j} ->
k:nat ->
l:nat{k <= l /\ l <= S.length rs2 /\ rs_elems_inv rg h rs2 k l} ->
Lemma (requires (S.equal (S.slice rs1 i j) (S.slice rs2 k l)))
(ensures (S.equal (as_seq_seq rg h rs1 i j)
(as_seq_seq rg h rs2 k l)))
let as_seq_seq_eq #a #rst rg h rs1 rs2 i j k l =
assert (forall (a:nat{a < j - i}).
S.index (as_seq_seq rg h rs1 i j) a ==
Rgl?.r_repr rg h (S.index rs1 (i + a)));
assert (forall (a:nat{a < l - k}).
S.index (as_seq_seq rg h rs2 k l) a ==
Rgl?.r_repr rg h (S.index rs2 (k + a)));
assert (S.length (S.slice rs1 i j) = j - i);
assert (S.length (S.slice rs2 k l) = l - k);
assert (forall (a:nat{a < j - i}).
S.index (S.slice rs1 i j) a ==
S.index (S.slice rs2 k l) a);
assert (forall (a:nat{a < j - i}).
S.index rs1 (i + a) == S.index rs2 (k + a))
val as_seq_seq_slice:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat -> l:nat{k <= l && l <= j - i} ->
Lemma (S.equal (S.slice (as_seq_seq rg h rs i j) k l)
(as_seq_seq rg h (S.slice rs (i + k) (i + l)) 0 (l - k)))
#reset-options "--z3rlimit 10"
let rec as_seq_seq_slice #a #rst rg h rs i j k l =
if k = l then ()
else (as_seq_seq_slice rg h rs i j k (l - 1);
as_seq_seq_index rg h rs i j (l - 1);
as_seq_seq_eq rg h
(S.slice rs (i + k) (i + l - 1))
(S.slice rs (i + k) (i + l))
0 (l - k - 1) 0 (l - k - 1))
val as_seq_seq_upd:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{
i <= j /\
j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
k:nat{i <= k && k < j} -> v:a{rg_inv rg h v} ->
Lemma (S.equal (as_seq_seq rg h (S.upd rs k v) i j)
(S.upd (as_seq_seq rg h rs i j) (k - i)
(Rgl?.r_repr rg h v)))
let rec as_seq_seq_upd #a #rst rg h rs i j k v =
if i = j then ()
else if k = j - 1 then ()
else as_seq_seq_upd rg h rs i (j - 1) k v
// Preservation based on disjointness
val as_seq_seq_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv_preserved rg rs i j p h0 h1;
S.equal (as_seq_seq rg h0 rs i j)
(as_seq_seq rg h1 rs i j)))
let rec as_seq_seq_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
as_seq_seq_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val as_seq_sub_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
loc_disjoint p (V.loc_vector rv) /\
modifies p h0 h1))
(ensures (rv_elems_inv_preserved rv i j p h0 h1;
S.equal (as_seq_sub h0 rv i j)
(as_seq_sub h1 rv i j)))
let as_seq_sub_preserved #a #rst #rg rv i j p h0 h1 =
as_seq_seq_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val as_seq_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv_preserved_ rv p h0 h1;
S.equal (as_seq h0 rv) (as_seq h1 rv)))
let as_seq_preserved_ #a #rst #rg rv p h0 h1 =
as_seq_sub_preserved rv 0ul (V.size_of rv) p h0 h1
// The second core lemma of `rvector`
val as_seq_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv_preserved rv p h0 h1;
S.equal (as_seq h0 rv) (as_seq h1 rv)))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let as_seq_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
as_seq_preserved_ rv p h0 h1
/// Construction
val alloc_empty:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 bv h1 -> h0 == h1 /\ V.size_of bv = 0ul))
let alloc_empty #a #rst rg =
V.alloc_empty a
val alloc_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
cidx:uint32_t{cidx <= V.size_of rv} ->
HST.ST unit
(requires (fun h0 -> rv_itself_inv h0 rv))
(ensures (fun h0 _ h1 ->
modifies (V.loc_vector_within rv 0ul cidx) h0 h1 /\
rv_itself_inv h1 rv /\
rv_elems_inv h1 rv 0ul cidx /\
rv_elems_reg h1 rv 0ul cidx /\
S.equal (as_seq_sub h1 rv 0ul cidx)
(S.create (U32.v cidx) (Ghost.reveal (Rgl?.irepr rg))) /\
// the loop invariant for this function
V.forall_ h1 rv 0ul cidx
(fun r -> HS.fresh_region (Rgl?.region_of rg r) h0 h1 /\
Rgl?.r_alloc_p rg r) /\
Set.subset (Map.domain (HS.get_hmap h0))
(Map.domain (HS.get_hmap h1))))
(decreases (U32.v cidx))
#reset-options "--z3rlimit 20"
let rec alloc_ #a #rst #rg rv cidx =
let hh0 = HST.get () in
if cidx = 0ul then ()
else (let nrid = HST.new_region (V.frameOf rv) in
let v = rg_alloc rg nrid in
let hh1 = HST.get () in
V.assign rv (cidx - 1ul) v;
let hh2 = HST.get () in
V.loc_vector_within_included rv (cidx - 1ul) cidx;
Rgl?.r_sep
rg (V.get hh2 rv (cidx - 1ul))
(V.loc_vector_within rv (cidx - 1ul) cidx)
hh1 hh2;
alloc_ rv (cidx - 1ul);
let hh3 = HST.get () in
V.loc_vector_within_included rv 0ul (cidx - 1ul);
Rgl?.r_sep
rg (V.get hh3 rv (cidx - 1ul))
(V.loc_vector_within rv 0ul (cidx - 1ul))
hh2 hh3;
V.forall2_extend hh3 rv 0ul (cidx - 1ul)
(fun r1 r2 -> HS.disjoint (Rgl?.region_of rg r1)
(Rgl?.region_of rg r2));
V.loc_vector_within_union_rev rv 0ul cidx)
val alloc_rid:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} -> rid:HST.erid ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
V.frameOf rv = rid /\
V.size_of rv = len /\
V.forall_all h1 rv (fun r -> Rgl?.r_alloc_p rg r) /\
S.equal (as_seq h1 rv)
(S.create (U32.v len) (Ghost.reveal (Rgl?.irepr rg)))))
let alloc_rid #a #rst rg len rid =
let vec = V.alloc_rid len (rg_dummy rg) rid in
alloc_ #a #rst #rg vec len;
V.loc_vector_within_included vec 0ul len;
vec
val alloc_reserve:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} -> rid:HST.erid ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
V.frameOf rv = rid /\
V.size_of rv = 0ul /\
S.equal (as_seq h1 rv) S.empty /\
Set.equal (Map.domain (HS.get_hmap h0))
(Map.domain (HS.get_hmap h1)) /\
B.fresh_loc (V.loc_vector rv) h0 h1))
let alloc_reserve #a #rst rg len rid =
V.alloc_reserve len (rg_dummy rg) rid
val alloc:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
HS.fresh_region (V.frameOf rv) h0 h1 /\
V.size_of rv = len /\
V.forall_all h1 rv (fun r -> Rgl?.r_alloc_p rg r) /\
S.equal (as_seq h1 rv)
(S.create (U32.v len) (Ghost.reveal (Rgl?.irepr rg)))))
let alloc #a #rst rg len =
let nrid = HST.new_region HS.root in
alloc_rid rg len nrid
val insert:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg{not (V.is_full rv)} -> v:a ->
HST.ST (rvector rg)
(requires (fun h0 ->
rv_inv h0 rv /\ rg_inv rg h0 v /\
HS.extends (Rgl?.region_of rg v) (V.frameOf rv) /\
V.forall_all h0 rv
(fun b -> HS.disjoint (Rgl?.region_of rg b)
(Rgl?.region_of rg v))))
(ensures (fun h0 irv h1 ->
V.size_of irv = V.size_of rv + 1ul /\
V.frameOf rv = V.frameOf irv /\
modifies (loc_union (V.loc_addr_of_vector rv)
(V.loc_vector irv)) h0 h1 /\
rv_inv h1 irv /\
V.get h1 irv (V.size_of rv) == v /\
S.equal (as_seq h1 irv)
(S.snoc (as_seq h0 rv) (Rgl?.r_repr rg h0 v))))
#reset-options "--z3rlimit 20"
let insert #a #rst #rg rv v =
let hh0 = HST.get () in
let irv = V.insert rv v in
let hh1 = HST.get () in
// Safety
rs_loc_elems_parent_disj
rg (V.as_seq hh0 rv) (V.frameOf rv) 0 (U32.v (V.size_of rv));
rs_elems_inv_preserved
rg (V.as_seq hh0 rv) 0 (U32.v (V.size_of rv))
(loc_region_only false (V.frameOf rv))
hh0 hh1;
Rgl?.r_sep rg v
(loc_region_only false (V.frameOf rv))
hh0 hh1;
// Correctness
assert (S.equal (V.as_seq hh0 rv)
(S.slice (V.as_seq hh1 irv) 0 (U32.v (V.size_of rv))));
as_seq_seq_preserved
rg (V.as_seq hh0 rv)
0 (U32.v (V.size_of rv))
(loc_region_only false (V.frameOf rv)) hh0 hh1;
as_seq_seq_slice
rg hh1 (V.as_seq hh1 irv) 0 (U32.v (V.size_of irv))
0 (U32.v (V.size_of rv));
irv
val insert_copy:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> cp:copyable #rst a rg ->
rv:rvector rg{not (V.is_full rv)} -> v:a ->
HST.ST (rvector rg)
(requires (fun h0 ->
rv_inv h0 rv /\ rg_inv rg h0 v /\
HS.disjoint (Rgl?.region_of rg v) (V.frameOf rv)))
(ensures (fun h0 irv h1 ->
V.size_of irv = V.size_of rv + 1ul /\
V.frameOf rv = V.frameOf irv /\
modifies (loc_rvector rv) h0 h1 /\
rv_inv h1 irv /\
S.equal (as_seq h1 irv)
(S.snoc (as_seq h0 rv) (Rgl?.r_repr rg h0 v))))
let insert_copy #a #rst #rg cp rv v =
let hh0 = HST.get () in
rv_elems_inv_live_region hh0 rv 0ul (V.size_of rv);
let nrid = HST.new_region (V.frameOf rv) in
let nv = rg_alloc rg nrid in
let hh1 = HST.get () in
Rgl?.r_sep rg v loc_none hh0 hh1;
rv_inv_preserved rv loc_none hh0 hh1;
as_seq_preserved rv loc_none hh0 hh1;
Cpy?.copy cp (Rgl?.state rg) v nv;
let hh2 = HST.get () in
rv_loc_elems_each_disj hh2 rv 0ul (V.size_of rv) nrid;
rv_inv_preserved_ rv (loc_all_regions_from false nrid) hh1 hh2;
as_seq_preserved_ rv (loc_all_regions_from false nrid) hh1 hh2;
insert rv nv
val assign:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} -> v:a ->
HST.ST unit
(requires (fun h0 ->
// rv_inv h0 rv /\
rv_itself_inv h0 rv /\
rv_elems_inv h0 rv 0ul i /\
rv_elems_inv h0 rv (i + 1ul) (V.size_of rv) /\
elems_reg h0 rv /\
V.forall_ h0 rv 0ul i
(fun b -> HS.disjoint (Rgl?.region_of rg b)
(Rgl?.region_of rg v)) /\
V.forall_ h0 rv (i + 1ul) (V.size_of rv)
(fun b -> HS.disjoint (Rgl?.region_of rg b)
(Rgl?.region_of rg v)) /\
rg_inv rg h0 v /\
HS.extends (Rgl?.region_of rg v) (V.frameOf rv)))
(ensures (fun h0 _ h1 ->
modifies (V.loc_vector_within rv i (i + 1ul)) h0 h1 /\
rv_inv h1 rv /\
V.get h1 rv i == v /\
S.equal (as_seq h1 rv)
(S.append
(as_seq_sub h0 rv 0ul i)
(S.cons (Rgl?.r_repr rg h0 v)
(as_seq_sub h0 rv (i + 1ul) (V.size_of rv))))))
let assign #a #rst #rg rv i v =
let hh0 = HST.get () in
V.assign rv i v;
let hh1 = HST.get () in
// Safety
rs_loc_elems_parent_disj
rg (V.as_seq hh0 rv) (V.frameOf rv) 0 (U32.v i);
rs_loc_elems_parent_disj
rg (V.as_seq hh0 rv) (V.frameOf rv) (U32.v i + 1) (U32.v (V.size_of rv));
rs_elems_inv_preserved
rg (V.as_seq hh0 rv) 0 (U32.v i)
(V.loc_vector rv)
hh0 hh1;
rs_elems_inv_preserved
rg (V.as_seq hh0 rv) (U32.v i + 1) (U32.v (V.size_of rv))
(V.loc_vector rv)
hh0 hh1;
Rgl?.r_sep rg v (V.loc_vector rv) hh0 hh1;
// Correctness
rs_loc_elems_parent_disj
rg (V.as_seq hh1 rv) (V.frameOf rv) 0 (U32.v (V.size_of rv));
as_seq_seq_preserved
rg (V.as_seq hh1 rv)
0 (U32.v (V.size_of rv))
(V.loc_vector rv) hh0 hh1
private val r_sep_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
v:a{rg_inv rg h0 v} ->
Lemma (requires (loc_disjoint (loc_all_regions_from
false (Rgl?.region_of rg v)) p /\
modifies p h0 h1))
(ensures (rg_inv rg h1 v /\
Rgl?.r_repr rg h0 v == Rgl?.r_repr rg h1 v)) | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val r_sep_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
v:a{rg_inv rg h0 v} ->
Lemma (requires (loc_disjoint (loc_all_regions_from
false (Rgl?.region_of rg v)) p /\
modifies p h0 h1))
(ensures (rg_inv rg h1 v /\
Rgl?.r_repr rg h0 v == Rgl?.r_repr rg h1 v)) | [] | LowStar.RVector.r_sep_forall | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
rg: LowStar.Regional.regional rst a ->
p: LowStar.Monotonic.Buffer.loc ->
h0: FStar.Monotonic.HyperStack.mem ->
h1: FStar.Monotonic.HyperStack.mem ->
v: a{LowStar.Regional.rg_inv rg h0 v}
-> FStar.Pervasives.Lemma
(requires
LowStar.Monotonic.Buffer.loc_disjoint (LowStar.Monotonic.Buffer.loc_all_regions_from false
(Rgl?.region_of rg v))
p /\ LowStar.Monotonic.Buffer.modifies p h0 h1)
(ensures LowStar.Regional.rg_inv rg h1 v /\ Rgl?.r_repr rg h0 v == Rgl?.r_repr rg h1 v) | {
"end_col": 25,
"end_line": 941,
"start_col": 2,
"start_line": 941
} |
FStar.HyperStack.ST.ST | val insert_copy:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> cp:copyable #rst a rg ->
rv:rvector rg{not (V.is_full rv)} -> v:a ->
HST.ST (rvector rg)
(requires (fun h0 ->
rv_inv h0 rv /\ rg_inv rg h0 v /\
HS.disjoint (Rgl?.region_of rg v) (V.frameOf rv)))
(ensures (fun h0 irv h1 ->
V.size_of irv = V.size_of rv + 1ul /\
V.frameOf rv = V.frameOf irv /\
modifies (loc_rvector rv) h0 h1 /\
rv_inv h1 irv /\
S.equal (as_seq h1 irv)
(S.snoc (as_seq h0 rv) (Rgl?.r_repr rg h0 v)))) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let insert_copy #a #rst #rg cp rv v =
let hh0 = HST.get () in
rv_elems_inv_live_region hh0 rv 0ul (V.size_of rv);
let nrid = HST.new_region (V.frameOf rv) in
let nv = rg_alloc rg nrid in
let hh1 = HST.get () in
Rgl?.r_sep rg v loc_none hh0 hh1;
rv_inv_preserved rv loc_none hh0 hh1;
as_seq_preserved rv loc_none hh0 hh1;
Cpy?.copy cp (Rgl?.state rg) v nv;
let hh2 = HST.get () in
rv_loc_elems_each_disj hh2 rv 0ul (V.size_of rv) nrid;
rv_inv_preserved_ rv (loc_all_regions_from false nrid) hh1 hh2;
as_seq_preserved_ rv (loc_all_regions_from false nrid) hh1 hh2;
insert rv nv | val insert_copy:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> cp:copyable #rst a rg ->
rv:rvector rg{not (V.is_full rv)} -> v:a ->
HST.ST (rvector rg)
(requires (fun h0 ->
rv_inv h0 rv /\ rg_inv rg h0 v /\
HS.disjoint (Rgl?.region_of rg v) (V.frameOf rv)))
(ensures (fun h0 irv h1 ->
V.size_of irv = V.size_of rv + 1ul /\
V.frameOf rv = V.frameOf irv /\
modifies (loc_rvector rv) h0 h1 /\
rv_inv h1 irv /\
S.equal (as_seq h1 irv)
(S.snoc (as_seq h0 rv) (Rgl?.r_repr rg h0 v))))
let insert_copy #a #rst #rg cp rv v = | true | null | false | let hh0 = HST.get () in
rv_elems_inv_live_region hh0 rv 0ul (V.size_of rv);
let nrid = HST.new_region (V.frameOf rv) in
let nv = rg_alloc rg nrid in
let hh1 = HST.get () in
Rgl?.r_sep rg v loc_none hh0 hh1;
rv_inv_preserved rv loc_none hh0 hh1;
as_seq_preserved rv loc_none hh0 hh1;
Cpy?.copy cp (Rgl?.state rg) v nv;
let hh2 = HST.get () in
rv_loc_elems_each_disj hh2 rv 0ul (V.size_of rv) nrid;
rv_inv_preserved_ rv (loc_all_regions_from false nrid) hh1 hh2;
as_seq_preserved_ rv (loc_all_regions_from false nrid) hh1 hh2;
insert rv nv | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [] | [
"LowStar.Regional.regional",
"LowStar.RVector.copyable",
"LowStar.RVector.rvector",
"Prims.b2t",
"Prims.op_Negation",
"LowStar.Vector.is_full",
"LowStar.RVector.insert",
"Prims.unit",
"LowStar.RVector.as_seq_preserved_",
"LowStar.Monotonic.Buffer.loc_all_regions_from",
"LowStar.RVector.rv_inv_preserved_",
"LowStar.RVector.rv_loc_elems_each_disj",
"FStar.UInt32.__uint_to_t",
"LowStar.Vector.size_of",
"FStar.Monotonic.HyperStack.mem",
"FStar.HyperStack.ST.get",
"LowStar.RVector.__proj__Cpy__item__copy",
"LowStar.Regional.__proj__Rgl__item__state",
"LowStar.RVector.as_seq_preserved",
"LowStar.Monotonic.Buffer.loc_none",
"LowStar.RVector.rv_inv_preserved",
"LowStar.Regional.__proj__Rgl__item__r_sep",
"LowStar.Regional.rg_alloc",
"FStar.Monotonic.HyperHeap.rid",
"FStar.HyperStack.ST.new_region",
"LowStar.Vector.frameOf",
"LowStar.RVector.rv_elems_inv_live_region"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1))
val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j)))
let rv_loc_elems_included #a #rst #rg h rv i j =
rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
// Properties about disjointness of locations
val rs_loc_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
l:nat{i <= l && l < j && k <> l} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj #a #rst rg rs prid i j k l = ()
val rs_loc_elem_disj_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (
forall (k:nat{i <= k && k < j}).
forall (l:nat{i <= l && l < j && k <> l}).
loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj_forall #a #rst rg rs prid i j = ()
val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2)
let rec rs_loc_elems_elem_disj #a #rst rg rs prid i j k1 k2 l =
if k1 = k2 then ()
else (rs_loc_elem_disj rg rs prid i j (k2 - 1) l;
rs_loc_elems_elem_disj rg rs prid i j k1 (k2 - 1) l)
val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2)
let rec rs_loc_elems_disj #a #rst rg rs prid i j k1 k2 l1 l2 =
if k1 = k2 then ()
else (rs_loc_elems_elem_disj rg rs prid i j l1 l2 (k2 - 1);
rs_loc_elems_disj rg rs prid i j k1 (k2 - 1) l1 l2)
val rv_loc_elems_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
k1:uint32_t{i <= k1} ->
k2:uint32_t{k1 <= k2 && k2 <= j} ->
l1:uint32_t{i <= l1} ->
l2:uint32_t{l1 <= l2 && l2 <= j} ->
Lemma (requires (rv_elems_reg h rv i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rv_loc_elems h rv k1 k2)
(rv_loc_elems h rv l1 l2)))
let rv_loc_elems_disj #a #rst #rg h rv i j k1 k2 l1 l2 =
rs_loc_elems_disj rg (V.as_seq h rv) (V.frameOf rv)
(U32.v i) (U32.v j) (U32.v k1) (U32.v k2) (U32.v l1) (U32.v l2)
val rs_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_region_only false prid)))
(decreases j)
let rec rs_loc_elems_parent_disj #a #rst rg rs prid i j =
if i = j then ()
else rs_loc_elems_parent_disj rg rs prid i (j - 1)
val rv_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_region_only false (V.frameOf rv))))
let rv_loc_elems_parent_disj #a #rst #rg h rv i j =
rs_loc_elems_parent_disj rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val rs_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> drid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (V.forall_seq rs i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_all_regions_from false drid)))
(decreases j)
let rec rs_loc_elems_each_disj #a #rst rg rs drid i j =
if i = j then ()
else rs_loc_elems_each_disj rg rs drid i (j - 1)
val rv_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
drid:HS.rid ->
Lemma (requires (V.forall_ h rv i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_all_regions_from false drid)))
let rv_loc_elems_each_disj #a #rst #rg h rv i j drid =
rs_loc_elems_each_disj rg (V.as_seq h rv) drid (U32.v i) (U32.v j)
// Preservation based on disjointness
val rv_loc_elems_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
loc_disjoint p (V.loc_vector_within rv i j) /\
modifies p h0 h1))
(ensures (rv_loc_elems h0 rv i j ==
rv_loc_elems h1 rv i j))
(decreases (U32.v j))
let rec rv_loc_elems_preserved #a #rst #rg rv i j p h0 h1 =
if i = j then ()
else (V.loc_vector_within_includes rv i j (j - 1ul) j;
V.get_preserved rv (j - 1ul) p h0 h1;
assert (V.get h0 rv (j - 1ul) == V.get h1 rv (j - 1ul));
V.loc_vector_within_includes rv i j i (j - 1ul);
rv_loc_elems_preserved rv i (j - 1ul) p h0 h1)
val rs_elems_inv_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv rg h1 rs i j))
(decreases j)
let rec rs_elems_inv_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val rv_elems_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (V.loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
modifies p h0 h1))
(ensures (rv_elems_inv h1 rv i j))
let rv_elems_inv_preserved #a #rst #rg rv i j p h0 h1 =
rs_elems_inv_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val rv_inv_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_ #a #rst #rg rv p h0 h1 =
rv_elems_inv_preserved #a #rst #rg rv 0ul (V.size_of rv) p h0 h1
// The first core lemma of `rvector`
val rv_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let rv_inv_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
rv_inv_preserved_ rv p h0 h1
val rv_inv_preserved_int:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
modifies (loc_all_regions_from false
(Rgl?.region_of rg (V.get h0 rv i))) h0 h1 /\
rg_inv rg h1 (V.get h1 rv i)))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_int #a #rst #rg rv i h0 h1 =
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv)) 0 (U32.v i) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) 0 (U32.v i)
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1;
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv))
(U32.v i + 1) (U32.v (V.size_of rv)) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) (U32.v i + 1) (U32.v (V.size_of rv))
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1
/// Representation
val as_seq_seq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = j - i})
(decreases j)
let rec as_seq_seq #a #rst rg h rs i j =
if i = j then S.empty
else S.snoc (as_seq_seq rg h rs i (j - 1))
(Rgl?.r_repr rg h (S.index rs (j - 1)))
val as_seq_sub:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t ->
j:uint32_t{
i <= j /\
j <= V.size_of rv /\
rv_elems_inv h rv i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v j - U32.v i})
(decreases (U32.v j))
let as_seq_sub #a #rst #rg h rv i j =
as_seq_seq rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v (V.size_of rv)})
let as_seq #a #rst #rg h rv =
as_seq_sub h rv 0ul (V.size_of rv)
val as_seq_sub_as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
Lemma (S.equal (as_seq_sub h rv 0ul (V.size_of rv))
(as_seq h rv))
[SMTPat (as_seq_sub h rv 0ul (V.size_of rv))]
let as_seq_sub_as_seq #a #rst #rg h rv = ()
val as_seq_seq_index:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat{k < j - i} ->
Lemma (requires true)
(ensures (S.index (as_seq_seq rg h rs i j) k ==
Rgl?.r_repr rg h (S.index rs (i + k))))
(decreases j)
[SMTPat (S.index (as_seq_seq rg h rs i j) k)]
let rec as_seq_seq_index #a #rst rg h rs i j k =
if i = j then ()
else if k = j - i - 1 then ()
else as_seq_seq_index rg h rs i (j - 1) k
val as_seq_seq_eq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs1:S.seq a -> rs2:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs1 /\ rs_elems_inv rg h rs1 i j} ->
k:nat ->
l:nat{k <= l /\ l <= S.length rs2 /\ rs_elems_inv rg h rs2 k l} ->
Lemma (requires (S.equal (S.slice rs1 i j) (S.slice rs2 k l)))
(ensures (S.equal (as_seq_seq rg h rs1 i j)
(as_seq_seq rg h rs2 k l)))
let as_seq_seq_eq #a #rst rg h rs1 rs2 i j k l =
assert (forall (a:nat{a < j - i}).
S.index (as_seq_seq rg h rs1 i j) a ==
Rgl?.r_repr rg h (S.index rs1 (i + a)));
assert (forall (a:nat{a < l - k}).
S.index (as_seq_seq rg h rs2 k l) a ==
Rgl?.r_repr rg h (S.index rs2 (k + a)));
assert (S.length (S.slice rs1 i j) = j - i);
assert (S.length (S.slice rs2 k l) = l - k);
assert (forall (a:nat{a < j - i}).
S.index (S.slice rs1 i j) a ==
S.index (S.slice rs2 k l) a);
assert (forall (a:nat{a < j - i}).
S.index rs1 (i + a) == S.index rs2 (k + a))
val as_seq_seq_slice:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat -> l:nat{k <= l && l <= j - i} ->
Lemma (S.equal (S.slice (as_seq_seq rg h rs i j) k l)
(as_seq_seq rg h (S.slice rs (i + k) (i + l)) 0 (l - k)))
#reset-options "--z3rlimit 10"
let rec as_seq_seq_slice #a #rst rg h rs i j k l =
if k = l then ()
else (as_seq_seq_slice rg h rs i j k (l - 1);
as_seq_seq_index rg h rs i j (l - 1);
as_seq_seq_eq rg h
(S.slice rs (i + k) (i + l - 1))
(S.slice rs (i + k) (i + l))
0 (l - k - 1) 0 (l - k - 1))
val as_seq_seq_upd:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{
i <= j /\
j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
k:nat{i <= k && k < j} -> v:a{rg_inv rg h v} ->
Lemma (S.equal (as_seq_seq rg h (S.upd rs k v) i j)
(S.upd (as_seq_seq rg h rs i j) (k - i)
(Rgl?.r_repr rg h v)))
let rec as_seq_seq_upd #a #rst rg h rs i j k v =
if i = j then ()
else if k = j - 1 then ()
else as_seq_seq_upd rg h rs i (j - 1) k v
// Preservation based on disjointness
val as_seq_seq_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv_preserved rg rs i j p h0 h1;
S.equal (as_seq_seq rg h0 rs i j)
(as_seq_seq rg h1 rs i j)))
let rec as_seq_seq_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
as_seq_seq_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val as_seq_sub_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
loc_disjoint p (V.loc_vector rv) /\
modifies p h0 h1))
(ensures (rv_elems_inv_preserved rv i j p h0 h1;
S.equal (as_seq_sub h0 rv i j)
(as_seq_sub h1 rv i j)))
let as_seq_sub_preserved #a #rst #rg rv i j p h0 h1 =
as_seq_seq_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val as_seq_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv_preserved_ rv p h0 h1;
S.equal (as_seq h0 rv) (as_seq h1 rv)))
let as_seq_preserved_ #a #rst #rg rv p h0 h1 =
as_seq_sub_preserved rv 0ul (V.size_of rv) p h0 h1
// The second core lemma of `rvector`
val as_seq_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv_preserved rv p h0 h1;
S.equal (as_seq h0 rv) (as_seq h1 rv)))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let as_seq_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
as_seq_preserved_ rv p h0 h1
/// Construction
val alloc_empty:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 bv h1 -> h0 == h1 /\ V.size_of bv = 0ul))
let alloc_empty #a #rst rg =
V.alloc_empty a
val alloc_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
cidx:uint32_t{cidx <= V.size_of rv} ->
HST.ST unit
(requires (fun h0 -> rv_itself_inv h0 rv))
(ensures (fun h0 _ h1 ->
modifies (V.loc_vector_within rv 0ul cidx) h0 h1 /\
rv_itself_inv h1 rv /\
rv_elems_inv h1 rv 0ul cidx /\
rv_elems_reg h1 rv 0ul cidx /\
S.equal (as_seq_sub h1 rv 0ul cidx)
(S.create (U32.v cidx) (Ghost.reveal (Rgl?.irepr rg))) /\
// the loop invariant for this function
V.forall_ h1 rv 0ul cidx
(fun r -> HS.fresh_region (Rgl?.region_of rg r) h0 h1 /\
Rgl?.r_alloc_p rg r) /\
Set.subset (Map.domain (HS.get_hmap h0))
(Map.domain (HS.get_hmap h1))))
(decreases (U32.v cidx))
#reset-options "--z3rlimit 20"
let rec alloc_ #a #rst #rg rv cidx =
let hh0 = HST.get () in
if cidx = 0ul then ()
else (let nrid = HST.new_region (V.frameOf rv) in
let v = rg_alloc rg nrid in
let hh1 = HST.get () in
V.assign rv (cidx - 1ul) v;
let hh2 = HST.get () in
V.loc_vector_within_included rv (cidx - 1ul) cidx;
Rgl?.r_sep
rg (V.get hh2 rv (cidx - 1ul))
(V.loc_vector_within rv (cidx - 1ul) cidx)
hh1 hh2;
alloc_ rv (cidx - 1ul);
let hh3 = HST.get () in
V.loc_vector_within_included rv 0ul (cidx - 1ul);
Rgl?.r_sep
rg (V.get hh3 rv (cidx - 1ul))
(V.loc_vector_within rv 0ul (cidx - 1ul))
hh2 hh3;
V.forall2_extend hh3 rv 0ul (cidx - 1ul)
(fun r1 r2 -> HS.disjoint (Rgl?.region_of rg r1)
(Rgl?.region_of rg r2));
V.loc_vector_within_union_rev rv 0ul cidx)
val alloc_rid:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} -> rid:HST.erid ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
V.frameOf rv = rid /\
V.size_of rv = len /\
V.forall_all h1 rv (fun r -> Rgl?.r_alloc_p rg r) /\
S.equal (as_seq h1 rv)
(S.create (U32.v len) (Ghost.reveal (Rgl?.irepr rg)))))
let alloc_rid #a #rst rg len rid =
let vec = V.alloc_rid len (rg_dummy rg) rid in
alloc_ #a #rst #rg vec len;
V.loc_vector_within_included vec 0ul len;
vec
val alloc_reserve:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} -> rid:HST.erid ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
V.frameOf rv = rid /\
V.size_of rv = 0ul /\
S.equal (as_seq h1 rv) S.empty /\
Set.equal (Map.domain (HS.get_hmap h0))
(Map.domain (HS.get_hmap h1)) /\
B.fresh_loc (V.loc_vector rv) h0 h1))
let alloc_reserve #a #rst rg len rid =
V.alloc_reserve len (rg_dummy rg) rid
val alloc:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
HS.fresh_region (V.frameOf rv) h0 h1 /\
V.size_of rv = len /\
V.forall_all h1 rv (fun r -> Rgl?.r_alloc_p rg r) /\
S.equal (as_seq h1 rv)
(S.create (U32.v len) (Ghost.reveal (Rgl?.irepr rg)))))
let alloc #a #rst rg len =
let nrid = HST.new_region HS.root in
alloc_rid rg len nrid
val insert:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg{not (V.is_full rv)} -> v:a ->
HST.ST (rvector rg)
(requires (fun h0 ->
rv_inv h0 rv /\ rg_inv rg h0 v /\
HS.extends (Rgl?.region_of rg v) (V.frameOf rv) /\
V.forall_all h0 rv
(fun b -> HS.disjoint (Rgl?.region_of rg b)
(Rgl?.region_of rg v))))
(ensures (fun h0 irv h1 ->
V.size_of irv = V.size_of rv + 1ul /\
V.frameOf rv = V.frameOf irv /\
modifies (loc_union (V.loc_addr_of_vector rv)
(V.loc_vector irv)) h0 h1 /\
rv_inv h1 irv /\
V.get h1 irv (V.size_of rv) == v /\
S.equal (as_seq h1 irv)
(S.snoc (as_seq h0 rv) (Rgl?.r_repr rg h0 v))))
#reset-options "--z3rlimit 20"
let insert #a #rst #rg rv v =
let hh0 = HST.get () in
let irv = V.insert rv v in
let hh1 = HST.get () in
// Safety
rs_loc_elems_parent_disj
rg (V.as_seq hh0 rv) (V.frameOf rv) 0 (U32.v (V.size_of rv));
rs_elems_inv_preserved
rg (V.as_seq hh0 rv) 0 (U32.v (V.size_of rv))
(loc_region_only false (V.frameOf rv))
hh0 hh1;
Rgl?.r_sep rg v
(loc_region_only false (V.frameOf rv))
hh0 hh1;
// Correctness
assert (S.equal (V.as_seq hh0 rv)
(S.slice (V.as_seq hh1 irv) 0 (U32.v (V.size_of rv))));
as_seq_seq_preserved
rg (V.as_seq hh0 rv)
0 (U32.v (V.size_of rv))
(loc_region_only false (V.frameOf rv)) hh0 hh1;
as_seq_seq_slice
rg hh1 (V.as_seq hh1 irv) 0 (U32.v (V.size_of irv))
0 (U32.v (V.size_of rv));
irv
val insert_copy:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> cp:copyable #rst a rg ->
rv:rvector rg{not (V.is_full rv)} -> v:a ->
HST.ST (rvector rg)
(requires (fun h0 ->
rv_inv h0 rv /\ rg_inv rg h0 v /\
HS.disjoint (Rgl?.region_of rg v) (V.frameOf rv)))
(ensures (fun h0 irv h1 ->
V.size_of irv = V.size_of rv + 1ul /\
V.frameOf rv = V.frameOf irv /\
modifies (loc_rvector rv) h0 h1 /\
rv_inv h1 irv /\
S.equal (as_seq h1 irv) | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val insert_copy:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> cp:copyable #rst a rg ->
rv:rvector rg{not (V.is_full rv)} -> v:a ->
HST.ST (rvector rg)
(requires (fun h0 ->
rv_inv h0 rv /\ rg_inv rg h0 v /\
HS.disjoint (Rgl?.region_of rg v) (V.frameOf rv)))
(ensures (fun h0 irv h1 ->
V.size_of irv = V.size_of rv + 1ul /\
V.frameOf rv = V.frameOf irv /\
modifies (loc_rvector rv) h0 h1 /\
rv_inv h1 irv /\
S.equal (as_seq h1 irv)
(S.snoc (as_seq h0 rv) (Rgl?.r_repr rg h0 v)))) | [] | LowStar.RVector.insert_copy | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
cp: LowStar.RVector.copyable a rg ->
rv: LowStar.RVector.rvector rg {Prims.op_Negation (LowStar.Vector.is_full rv)} ->
v: a
-> FStar.HyperStack.ST.ST (LowStar.RVector.rvector rg) | {
"end_col": 14,
"end_line": 873,
"start_col": 37,
"start_line": 857
} |
FStar.HyperStack.ST.ST | val free_elems_from:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
idx:uint32_t{idx < V.size_of rv} ->
HST.ST unit
(requires (fun h0 ->
V.live h0 rv /\
rv_elems_inv h0 rv idx (V.size_of rv) /\
rv_elems_reg h0 rv idx (V.size_of rv)))
(ensures (fun h0 _ h1 ->
modifies (rv_loc_elems h0 rv idx (V.size_of rv)) h0 h1)) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rec free_elems_from #a #rst #rg rv idx =
let hh0 = HST.get () in
rs_loc_elems_elem_disj
rg (V.as_seq hh0 rv) (V.frameOf rv)
(U32.v idx) (U32.v (V.size_of rv))
(U32.v idx+1) (U32.v (V.size_of rv))
(U32.v idx);
rg_free rg (V.index rv idx);
let hh1 = HST.get () in
rv_elems_inv_preserved
rv (idx+1ul) (V.size_of rv)
(rv_loc_elem hh0 rv idx) hh0 hh1;
if idx + 1ul < V.size_of rv then
begin
free_elems_from rv (idx + 1ul);
rs_loc_elems_rec_inverse rg (V.as_seq hh0 rv) (U32.v idx) (U32.v (V.size_of rv))
end | val free_elems_from:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
idx:uint32_t{idx < V.size_of rv} ->
HST.ST unit
(requires (fun h0 ->
V.live h0 rv /\
rv_elems_inv h0 rv idx (V.size_of rv) /\
rv_elems_reg h0 rv idx (V.size_of rv)))
(ensures (fun h0 _ h1 ->
modifies (rv_loc_elems h0 rv idx (V.size_of rv)) h0 h1))
let rec free_elems_from #a #rst #rg rv idx = | true | null | false | let hh0 = HST.get () in
rs_loc_elems_elem_disj rg
(V.as_seq hh0 rv)
(V.frameOf rv)
(U32.v idx)
(U32.v (V.size_of rv))
(U32.v idx + 1)
(U32.v (V.size_of rv))
(U32.v idx);
rg_free rg (V.index rv idx);
let hh1 = HST.get () in
rv_elems_inv_preserved rv (idx + 1ul) (V.size_of rv) (rv_loc_elem hh0 rv idx) hh0 hh1;
if idx + 1ul < V.size_of rv
then
(free_elems_from rv (idx + 1ul);
rs_loc_elems_rec_inverse rg (V.as_seq hh0 rv) (U32.v idx) (U32.v (V.size_of rv))) | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [] | [
"LowStar.Regional.regional",
"LowStar.RVector.rvector",
"LowStar.Vector.uint32_t",
"Prims.b2t",
"FStar.Integers.op_Less",
"FStar.Integers.Unsigned",
"FStar.Integers.W32",
"LowStar.Vector.size_of",
"FStar.Integers.op_Plus",
"FStar.UInt32.__uint_to_t",
"LowStar.RVector.rs_loc_elems_rec_inverse",
"LowStar.Vector.as_seq",
"FStar.UInt32.v",
"Prims.unit",
"LowStar.RVector.free_elems_from",
"Prims.bool",
"LowStar.RVector.rv_elems_inv_preserved",
"LowStar.RVector.rv_loc_elem",
"FStar.Monotonic.HyperStack.mem",
"FStar.HyperStack.ST.get",
"LowStar.Regional.rg_free",
"LowStar.Vector.index",
"LowStar.RVector.rs_loc_elems_elem_disj",
"LowStar.Vector.frameOf",
"FStar.Integers.Signed",
"FStar.Integers.Winfinite"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1))
val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j)))
let rv_loc_elems_included #a #rst #rg h rv i j =
rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
// Properties about disjointness of locations
val rs_loc_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
l:nat{i <= l && l < j && k <> l} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj #a #rst rg rs prid i j k l = ()
val rs_loc_elem_disj_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (
forall (k:nat{i <= k && k < j}).
forall (l:nat{i <= l && l < j && k <> l}).
loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj_forall #a #rst rg rs prid i j = ()
val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2)
let rec rs_loc_elems_elem_disj #a #rst rg rs prid i j k1 k2 l =
if k1 = k2 then ()
else (rs_loc_elem_disj rg rs prid i j (k2 - 1) l;
rs_loc_elems_elem_disj rg rs prid i j k1 (k2 - 1) l)
val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2)
let rec rs_loc_elems_disj #a #rst rg rs prid i j k1 k2 l1 l2 =
if k1 = k2 then ()
else (rs_loc_elems_elem_disj rg rs prid i j l1 l2 (k2 - 1);
rs_loc_elems_disj rg rs prid i j k1 (k2 - 1) l1 l2)
val rv_loc_elems_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
k1:uint32_t{i <= k1} ->
k2:uint32_t{k1 <= k2 && k2 <= j} ->
l1:uint32_t{i <= l1} ->
l2:uint32_t{l1 <= l2 && l2 <= j} ->
Lemma (requires (rv_elems_reg h rv i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rv_loc_elems h rv k1 k2)
(rv_loc_elems h rv l1 l2)))
let rv_loc_elems_disj #a #rst #rg h rv i j k1 k2 l1 l2 =
rs_loc_elems_disj rg (V.as_seq h rv) (V.frameOf rv)
(U32.v i) (U32.v j) (U32.v k1) (U32.v k2) (U32.v l1) (U32.v l2)
val rs_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_region_only false prid)))
(decreases j)
let rec rs_loc_elems_parent_disj #a #rst rg rs prid i j =
if i = j then ()
else rs_loc_elems_parent_disj rg rs prid i (j - 1)
val rv_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_region_only false (V.frameOf rv))))
let rv_loc_elems_parent_disj #a #rst #rg h rv i j =
rs_loc_elems_parent_disj rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val rs_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> drid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (V.forall_seq rs i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_all_regions_from false drid)))
(decreases j)
let rec rs_loc_elems_each_disj #a #rst rg rs drid i j =
if i = j then ()
else rs_loc_elems_each_disj rg rs drid i (j - 1)
val rv_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
drid:HS.rid ->
Lemma (requires (V.forall_ h rv i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_all_regions_from false drid)))
let rv_loc_elems_each_disj #a #rst #rg h rv i j drid =
rs_loc_elems_each_disj rg (V.as_seq h rv) drid (U32.v i) (U32.v j)
// Preservation based on disjointness
val rv_loc_elems_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
loc_disjoint p (V.loc_vector_within rv i j) /\
modifies p h0 h1))
(ensures (rv_loc_elems h0 rv i j ==
rv_loc_elems h1 rv i j))
(decreases (U32.v j))
let rec rv_loc_elems_preserved #a #rst #rg rv i j p h0 h1 =
if i = j then ()
else (V.loc_vector_within_includes rv i j (j - 1ul) j;
V.get_preserved rv (j - 1ul) p h0 h1;
assert (V.get h0 rv (j - 1ul) == V.get h1 rv (j - 1ul));
V.loc_vector_within_includes rv i j i (j - 1ul);
rv_loc_elems_preserved rv i (j - 1ul) p h0 h1)
val rs_elems_inv_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv rg h1 rs i j))
(decreases j)
let rec rs_elems_inv_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val rv_elems_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (V.loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
modifies p h0 h1))
(ensures (rv_elems_inv h1 rv i j))
let rv_elems_inv_preserved #a #rst #rg rv i j p h0 h1 =
rs_elems_inv_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val rv_inv_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_ #a #rst #rg rv p h0 h1 =
rv_elems_inv_preserved #a #rst #rg rv 0ul (V.size_of rv) p h0 h1
// The first core lemma of `rvector`
val rv_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let rv_inv_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
rv_inv_preserved_ rv p h0 h1
val rv_inv_preserved_int:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
modifies (loc_all_regions_from false
(Rgl?.region_of rg (V.get h0 rv i))) h0 h1 /\
rg_inv rg h1 (V.get h1 rv i)))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_int #a #rst #rg rv i h0 h1 =
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv)) 0 (U32.v i) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) 0 (U32.v i)
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1;
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv))
(U32.v i + 1) (U32.v (V.size_of rv)) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) (U32.v i + 1) (U32.v (V.size_of rv))
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1
/// Representation
val as_seq_seq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = j - i})
(decreases j)
let rec as_seq_seq #a #rst rg h rs i j =
if i = j then S.empty
else S.snoc (as_seq_seq rg h rs i (j - 1))
(Rgl?.r_repr rg h (S.index rs (j - 1)))
val as_seq_sub:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t ->
j:uint32_t{
i <= j /\
j <= V.size_of rv /\
rv_elems_inv h rv i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v j - U32.v i})
(decreases (U32.v j))
let as_seq_sub #a #rst #rg h rv i j =
as_seq_seq rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v (V.size_of rv)})
let as_seq #a #rst #rg h rv =
as_seq_sub h rv 0ul (V.size_of rv)
val as_seq_sub_as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
Lemma (S.equal (as_seq_sub h rv 0ul (V.size_of rv))
(as_seq h rv))
[SMTPat (as_seq_sub h rv 0ul (V.size_of rv))]
let as_seq_sub_as_seq #a #rst #rg h rv = ()
val as_seq_seq_index:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat{k < j - i} ->
Lemma (requires true)
(ensures (S.index (as_seq_seq rg h rs i j) k ==
Rgl?.r_repr rg h (S.index rs (i + k))))
(decreases j)
[SMTPat (S.index (as_seq_seq rg h rs i j) k)]
let rec as_seq_seq_index #a #rst rg h rs i j k =
if i = j then ()
else if k = j - i - 1 then ()
else as_seq_seq_index rg h rs i (j - 1) k
val as_seq_seq_eq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs1:S.seq a -> rs2:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs1 /\ rs_elems_inv rg h rs1 i j} ->
k:nat ->
l:nat{k <= l /\ l <= S.length rs2 /\ rs_elems_inv rg h rs2 k l} ->
Lemma (requires (S.equal (S.slice rs1 i j) (S.slice rs2 k l)))
(ensures (S.equal (as_seq_seq rg h rs1 i j)
(as_seq_seq rg h rs2 k l)))
let as_seq_seq_eq #a #rst rg h rs1 rs2 i j k l =
assert (forall (a:nat{a < j - i}).
S.index (as_seq_seq rg h rs1 i j) a ==
Rgl?.r_repr rg h (S.index rs1 (i + a)));
assert (forall (a:nat{a < l - k}).
S.index (as_seq_seq rg h rs2 k l) a ==
Rgl?.r_repr rg h (S.index rs2 (k + a)));
assert (S.length (S.slice rs1 i j) = j - i);
assert (S.length (S.slice rs2 k l) = l - k);
assert (forall (a:nat{a < j - i}).
S.index (S.slice rs1 i j) a ==
S.index (S.slice rs2 k l) a);
assert (forall (a:nat{a < j - i}).
S.index rs1 (i + a) == S.index rs2 (k + a))
val as_seq_seq_slice:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat -> l:nat{k <= l && l <= j - i} ->
Lemma (S.equal (S.slice (as_seq_seq rg h rs i j) k l)
(as_seq_seq rg h (S.slice rs (i + k) (i + l)) 0 (l - k)))
#reset-options "--z3rlimit 10"
let rec as_seq_seq_slice #a #rst rg h rs i j k l =
if k = l then ()
else (as_seq_seq_slice rg h rs i j k (l - 1);
as_seq_seq_index rg h rs i j (l - 1);
as_seq_seq_eq rg h
(S.slice rs (i + k) (i + l - 1))
(S.slice rs (i + k) (i + l))
0 (l - k - 1) 0 (l - k - 1))
val as_seq_seq_upd:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{
i <= j /\
j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
k:nat{i <= k && k < j} -> v:a{rg_inv rg h v} ->
Lemma (S.equal (as_seq_seq rg h (S.upd rs k v) i j)
(S.upd (as_seq_seq rg h rs i j) (k - i)
(Rgl?.r_repr rg h v)))
let rec as_seq_seq_upd #a #rst rg h rs i j k v =
if i = j then ()
else if k = j - 1 then ()
else as_seq_seq_upd rg h rs i (j - 1) k v
// Preservation based on disjointness
val as_seq_seq_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv_preserved rg rs i j p h0 h1;
S.equal (as_seq_seq rg h0 rs i j)
(as_seq_seq rg h1 rs i j)))
let rec as_seq_seq_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
as_seq_seq_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val as_seq_sub_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
loc_disjoint p (V.loc_vector rv) /\
modifies p h0 h1))
(ensures (rv_elems_inv_preserved rv i j p h0 h1;
S.equal (as_seq_sub h0 rv i j)
(as_seq_sub h1 rv i j)))
let as_seq_sub_preserved #a #rst #rg rv i j p h0 h1 =
as_seq_seq_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val as_seq_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv_preserved_ rv p h0 h1;
S.equal (as_seq h0 rv) (as_seq h1 rv)))
let as_seq_preserved_ #a #rst #rg rv p h0 h1 =
as_seq_sub_preserved rv 0ul (V.size_of rv) p h0 h1
// The second core lemma of `rvector`
val as_seq_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv_preserved rv p h0 h1;
S.equal (as_seq h0 rv) (as_seq h1 rv)))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let as_seq_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
as_seq_preserved_ rv p h0 h1
/// Construction
val alloc_empty:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 bv h1 -> h0 == h1 /\ V.size_of bv = 0ul))
let alloc_empty #a #rst rg =
V.alloc_empty a
val alloc_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
cidx:uint32_t{cidx <= V.size_of rv} ->
HST.ST unit
(requires (fun h0 -> rv_itself_inv h0 rv))
(ensures (fun h0 _ h1 ->
modifies (V.loc_vector_within rv 0ul cidx) h0 h1 /\
rv_itself_inv h1 rv /\
rv_elems_inv h1 rv 0ul cidx /\
rv_elems_reg h1 rv 0ul cidx /\
S.equal (as_seq_sub h1 rv 0ul cidx)
(S.create (U32.v cidx) (Ghost.reveal (Rgl?.irepr rg))) /\
// the loop invariant for this function
V.forall_ h1 rv 0ul cidx
(fun r -> HS.fresh_region (Rgl?.region_of rg r) h0 h1 /\
Rgl?.r_alloc_p rg r) /\
Set.subset (Map.domain (HS.get_hmap h0))
(Map.domain (HS.get_hmap h1))))
(decreases (U32.v cidx))
#reset-options "--z3rlimit 20"
let rec alloc_ #a #rst #rg rv cidx =
let hh0 = HST.get () in
if cidx = 0ul then ()
else (let nrid = HST.new_region (V.frameOf rv) in
let v = rg_alloc rg nrid in
let hh1 = HST.get () in
V.assign rv (cidx - 1ul) v;
let hh2 = HST.get () in
V.loc_vector_within_included rv (cidx - 1ul) cidx;
Rgl?.r_sep
rg (V.get hh2 rv (cidx - 1ul))
(V.loc_vector_within rv (cidx - 1ul) cidx)
hh1 hh2;
alloc_ rv (cidx - 1ul);
let hh3 = HST.get () in
V.loc_vector_within_included rv 0ul (cidx - 1ul);
Rgl?.r_sep
rg (V.get hh3 rv (cidx - 1ul))
(V.loc_vector_within rv 0ul (cidx - 1ul))
hh2 hh3;
V.forall2_extend hh3 rv 0ul (cidx - 1ul)
(fun r1 r2 -> HS.disjoint (Rgl?.region_of rg r1)
(Rgl?.region_of rg r2));
V.loc_vector_within_union_rev rv 0ul cidx)
val alloc_rid:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} -> rid:HST.erid ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
V.frameOf rv = rid /\
V.size_of rv = len /\
V.forall_all h1 rv (fun r -> Rgl?.r_alloc_p rg r) /\
S.equal (as_seq h1 rv)
(S.create (U32.v len) (Ghost.reveal (Rgl?.irepr rg)))))
let alloc_rid #a #rst rg len rid =
let vec = V.alloc_rid len (rg_dummy rg) rid in
alloc_ #a #rst #rg vec len;
V.loc_vector_within_included vec 0ul len;
vec
val alloc_reserve:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} -> rid:HST.erid ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
V.frameOf rv = rid /\
V.size_of rv = 0ul /\
S.equal (as_seq h1 rv) S.empty /\
Set.equal (Map.domain (HS.get_hmap h0))
(Map.domain (HS.get_hmap h1)) /\
B.fresh_loc (V.loc_vector rv) h0 h1))
let alloc_reserve #a #rst rg len rid =
V.alloc_reserve len (rg_dummy rg) rid
val alloc:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
HS.fresh_region (V.frameOf rv) h0 h1 /\
V.size_of rv = len /\
V.forall_all h1 rv (fun r -> Rgl?.r_alloc_p rg r) /\
S.equal (as_seq h1 rv)
(S.create (U32.v len) (Ghost.reveal (Rgl?.irepr rg)))))
let alloc #a #rst rg len =
let nrid = HST.new_region HS.root in
alloc_rid rg len nrid
val insert:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg{not (V.is_full rv)} -> v:a ->
HST.ST (rvector rg)
(requires (fun h0 ->
rv_inv h0 rv /\ rg_inv rg h0 v /\
HS.extends (Rgl?.region_of rg v) (V.frameOf rv) /\
V.forall_all h0 rv
(fun b -> HS.disjoint (Rgl?.region_of rg b)
(Rgl?.region_of rg v))))
(ensures (fun h0 irv h1 ->
V.size_of irv = V.size_of rv + 1ul /\
V.frameOf rv = V.frameOf irv /\
modifies (loc_union (V.loc_addr_of_vector rv)
(V.loc_vector irv)) h0 h1 /\
rv_inv h1 irv /\
V.get h1 irv (V.size_of rv) == v /\
S.equal (as_seq h1 irv)
(S.snoc (as_seq h0 rv) (Rgl?.r_repr rg h0 v))))
#reset-options "--z3rlimit 20"
let insert #a #rst #rg rv v =
let hh0 = HST.get () in
let irv = V.insert rv v in
let hh1 = HST.get () in
// Safety
rs_loc_elems_parent_disj
rg (V.as_seq hh0 rv) (V.frameOf rv) 0 (U32.v (V.size_of rv));
rs_elems_inv_preserved
rg (V.as_seq hh0 rv) 0 (U32.v (V.size_of rv))
(loc_region_only false (V.frameOf rv))
hh0 hh1;
Rgl?.r_sep rg v
(loc_region_only false (V.frameOf rv))
hh0 hh1;
// Correctness
assert (S.equal (V.as_seq hh0 rv)
(S.slice (V.as_seq hh1 irv) 0 (U32.v (V.size_of rv))));
as_seq_seq_preserved
rg (V.as_seq hh0 rv)
0 (U32.v (V.size_of rv))
(loc_region_only false (V.frameOf rv)) hh0 hh1;
as_seq_seq_slice
rg hh1 (V.as_seq hh1 irv) 0 (U32.v (V.size_of irv))
0 (U32.v (V.size_of rv));
irv
val insert_copy:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> cp:copyable #rst a rg ->
rv:rvector rg{not (V.is_full rv)} -> v:a ->
HST.ST (rvector rg)
(requires (fun h0 ->
rv_inv h0 rv /\ rg_inv rg h0 v /\
HS.disjoint (Rgl?.region_of rg v) (V.frameOf rv)))
(ensures (fun h0 irv h1 ->
V.size_of irv = V.size_of rv + 1ul /\
V.frameOf rv = V.frameOf irv /\
modifies (loc_rvector rv) h0 h1 /\
rv_inv h1 irv /\
S.equal (as_seq h1 irv)
(S.snoc (as_seq h0 rv) (Rgl?.r_repr rg h0 v))))
let insert_copy #a #rst #rg cp rv v =
let hh0 = HST.get () in
rv_elems_inv_live_region hh0 rv 0ul (V.size_of rv);
let nrid = HST.new_region (V.frameOf rv) in
let nv = rg_alloc rg nrid in
let hh1 = HST.get () in
Rgl?.r_sep rg v loc_none hh0 hh1;
rv_inv_preserved rv loc_none hh0 hh1;
as_seq_preserved rv loc_none hh0 hh1;
Cpy?.copy cp (Rgl?.state rg) v nv;
let hh2 = HST.get () in
rv_loc_elems_each_disj hh2 rv 0ul (V.size_of rv) nrid;
rv_inv_preserved_ rv (loc_all_regions_from false nrid) hh1 hh2;
as_seq_preserved_ rv (loc_all_regions_from false nrid) hh1 hh2;
insert rv nv
val assign:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} -> v:a ->
HST.ST unit
(requires (fun h0 ->
// rv_inv h0 rv /\
rv_itself_inv h0 rv /\
rv_elems_inv h0 rv 0ul i /\
rv_elems_inv h0 rv (i + 1ul) (V.size_of rv) /\
elems_reg h0 rv /\
V.forall_ h0 rv 0ul i
(fun b -> HS.disjoint (Rgl?.region_of rg b)
(Rgl?.region_of rg v)) /\
V.forall_ h0 rv (i + 1ul) (V.size_of rv)
(fun b -> HS.disjoint (Rgl?.region_of rg b)
(Rgl?.region_of rg v)) /\
rg_inv rg h0 v /\
HS.extends (Rgl?.region_of rg v) (V.frameOf rv)))
(ensures (fun h0 _ h1 ->
modifies (V.loc_vector_within rv i (i + 1ul)) h0 h1 /\
rv_inv h1 rv /\
V.get h1 rv i == v /\
S.equal (as_seq h1 rv)
(S.append
(as_seq_sub h0 rv 0ul i)
(S.cons (Rgl?.r_repr rg h0 v)
(as_seq_sub h0 rv (i + 1ul) (V.size_of rv))))))
let assign #a #rst #rg rv i v =
let hh0 = HST.get () in
V.assign rv i v;
let hh1 = HST.get () in
// Safety
rs_loc_elems_parent_disj
rg (V.as_seq hh0 rv) (V.frameOf rv) 0 (U32.v i);
rs_loc_elems_parent_disj
rg (V.as_seq hh0 rv) (V.frameOf rv) (U32.v i + 1) (U32.v (V.size_of rv));
rs_elems_inv_preserved
rg (V.as_seq hh0 rv) 0 (U32.v i)
(V.loc_vector rv)
hh0 hh1;
rs_elems_inv_preserved
rg (V.as_seq hh0 rv) (U32.v i + 1) (U32.v (V.size_of rv))
(V.loc_vector rv)
hh0 hh1;
Rgl?.r_sep rg v (V.loc_vector rv) hh0 hh1;
// Correctness
rs_loc_elems_parent_disj
rg (V.as_seq hh1 rv) (V.frameOf rv) 0 (U32.v (V.size_of rv));
as_seq_seq_preserved
rg (V.as_seq hh1 rv)
0 (U32.v (V.size_of rv))
(V.loc_vector rv) hh0 hh1
private val r_sep_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
v:a{rg_inv rg h0 v} ->
Lemma (requires (loc_disjoint (loc_all_regions_from
false (Rgl?.region_of rg v)) p /\
modifies p h0 h1))
(ensures (rg_inv rg h1 v /\
Rgl?.r_repr rg h0 v == Rgl?.r_repr rg h1 v))
private let r_sep_forall #a #rst rg p h0 h1 v =
Rgl?.r_sep rg v p h0 h1
val assign_copy:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> cp:copyable #rst a rg ->
rv:rvector rg ->
i:uint32_t{i < V.size_of rv} -> v:a ->
HST.ST unit
(requires (fun h0 ->
rv_inv h0 rv /\
rg_inv rg h0 v /\
HS.disjoint (Rgl?.region_of rg v) (V.frameOf rv)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg (V.get h1 rv i))) h0 h1 /\
rv_inv h1 rv /\
S.equal (as_seq h1 rv)
(S.upd (as_seq h0 rv) (U32.v i) (Rgl?.r_repr rg h0 v))))
let assign_copy #a #rst #rg cp rv i v =
let hh0 = HST.get () in
Cpy?.copy cp (Rgl?.state rg) v (V.index rv i);
let hh1 = HST.get () in
// Safety
rv_inv_preserved_int #a #rst #rg rv i hh0 hh1;
// Correctness
forall_intro
(move_requires
(rs_loc_elem_disj
rg (V.as_seq hh0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv))
(U32.v i)));
assert (forall (k:nat{k <> U32.v i && k < U32.v (V.size_of rv)}).
loc_disjoint (rs_loc_elem rg (V.as_seq hh0 rv) k)
(rs_loc_elem rg (V.as_seq hh0 rv) (U32.v i)));
forall_intro
(move_requires
(r_sep_forall
rg (rs_loc_elem rg (V.as_seq hh0 rv) (U32.v i))
hh0 hh1));
assert (forall (k:nat{k <> U32.v i && k < U32.v (V.size_of rv)}).
loc_disjoint (rs_loc_elem rg (V.as_seq hh0 rv) k)
(rs_loc_elem rg (V.as_seq hh0 rv) (U32.v i)) ==>
Rgl?.r_repr rg hh1 (S.index (V.as_seq hh1 rv) k) ==
Rgl?.r_repr rg hh0 (S.index (V.as_seq hh0 rv) k));
assert (forall (k:nat{k <> U32.v i && k < U32.v (V.size_of rv)}).
Rgl?.r_repr rg hh1 (S.index (V.as_seq hh1 rv) k) ==
Rgl?.r_repr rg hh0 (S.index (V.as_seq hh0 rv) k));
assert (forall (k:nat{k <> U32.v i && k < U32.v (V.size_of rv)}).
S.index (as_seq_seq rg hh1 (V.as_seq hh1 rv)
0 (U32.v (V.size_of rv))) k ==
S.index (as_seq_seq rg hh0 (V.as_seq hh0 rv)
0 (U32.v (V.size_of rv))) k)
val free_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
idx:uint32_t{idx < V.size_of rv} ->
HST.ST unit
(requires (fun h0 ->
V.live h0 rv /\
rv_elems_inv h0 rv 0ul (idx + 1ul) /\
rv_elems_reg h0 rv 0ul (idx + 1ul)))
(ensures (fun h0 _ h1 ->
modifies (rv_loc_elems h0 rv 0ul (idx + 1ul)) h0 h1))
let rec free_elems #a #rst #rg rv idx =
let hh0 = HST.get () in
rg_free rg (V.index rv idx);
let hh1 = HST.get () in
rs_loc_elems_elem_disj
rg (V.as_seq hh0 rv) (V.frameOf rv)
0 (U32.v idx + 1) 0 (U32.v idx) (U32.v idx);
rv_elems_inv_preserved
rv 0ul idx (rs_loc_elem rg (V.as_seq hh0 rv) (U32.v idx)) hh0 hh1;
if idx <> 0ul then
free_elems rv (idx - 1ul)
val flush:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg -> i:uint32_t{i <= V.size_of rv} ->
HST.ST (rvector rg)
(requires (fun h0 -> rv_inv h0 rv))
(ensures (fun h0 frv h1 ->
V.size_of frv = V.size_of rv - i /\
V.frameOf rv = V.frameOf frv /\
modifies (loc_rvector rv) h0 h1 /\
rv_inv h1 frv /\
S.equal (as_seq h1 frv)
(S.slice (as_seq h0 rv) (U32.v i) (U32.v (V.size_of rv)))))
#reset-options "--z3rlimit 40"
let flush #a #rst #rg rv i =
let hh0 = HST.get () in
(if i = 0ul then () else free_elems rv (i - 1ul));
rv_loc_elems_included hh0 rv 0ul i;
let hh1 = HST.get () in
assert (modifies (rs_loc_elems rg (V.as_seq hh0 rv) 0 (U32.v i)) hh0 hh1);
let frv = V.flush rv (rg_dummy rg) i in
let hh2 = HST.get () in
assert (modifies (loc_region_only false (V.frameOf rv)) hh1 hh2);
// Safety
rs_loc_elems_disj
rg (V.as_seq hh0 rv) (V.frameOf rv) 0 (U32.v (V.size_of rv))
0 (U32.v i) (U32.v i) (U32.v (V.size_of rv));
rs_loc_elems_parent_disj
rg (V.as_seq hh0 rv) (V.frameOf rv)
(U32.v i) (U32.v (V.size_of rv));
rs_elems_inv_preserved
rg (V.as_seq hh0 rv) (U32.v i) (U32.v (V.size_of rv))
(loc_union (rs_loc_elems rg (V.as_seq hh0 rv) 0 (U32.v i))
(loc_region_only false (V.frameOf rv)))
hh0 hh2;
assert (rv_inv #a #rst #rg hh2 frv);
// Correctness
as_seq_seq_preserved
rg (V.as_seq hh0 rv) (U32.v i) (U32.v (V.size_of rv))
(loc_union (rs_loc_elems rg (V.as_seq hh0 rv) 0 (U32.v i))
(loc_region_only false (V.frameOf rv)))
hh0 hh2;
as_seq_seq_slice
rg hh0 (V.as_seq hh0 rv) 0 (U32.v (V.size_of rv))
(U32.v i) (U32.v (V.size_of rv));
assert (S.equal (S.slice (as_seq hh0 rv) (U32.v i) (U32.v (V.size_of rv)))
(as_seq_seq rg hh2 (V.as_seq hh0 rv)
(U32.v i) (U32.v (V.size_of rv))));
as_seq_seq_eq
rg hh2 (V.as_seq hh0 rv) (V.as_seq hh2 frv)
(U32.v i) (U32.v (V.size_of rv)) 0 (U32.v (V.size_of frv));
assert (S.equal (as_seq_seq rg hh2 (V.as_seq hh2 frv)
0 (U32.v (V.size_of frv)))
(as_seq_seq rg hh2 (V.as_seq hh0 rv)
(U32.v i) (U32.v (V.size_of rv))));
assert (S.equal (S.slice (as_seq hh0 rv) (U32.v i) (U32.v (V.size_of rv)))
(as_seq hh2 frv));
frv
val free_elems_from:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
idx:uint32_t{idx < V.size_of rv} ->
HST.ST unit
(requires (fun h0 ->
V.live h0 rv /\
rv_elems_inv h0 rv idx (V.size_of rv) /\
rv_elems_reg h0 rv idx (V.size_of rv)))
(ensures (fun h0 _ h1 -> | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 40,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val free_elems_from:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
idx:uint32_t{idx < V.size_of rv} ->
HST.ST unit
(requires (fun h0 ->
V.live h0 rv /\
rv_elems_inv h0 rv idx (V.size_of rv) /\
rv_elems_reg h0 rv idx (V.size_of rv)))
(ensures (fun h0 _ h1 ->
modifies (rv_loc_elems h0 rv idx (V.size_of rv)) h0 h1)) | [
"recursion"
] | LowStar.RVector.free_elems_from | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | rv: LowStar.RVector.rvector rg -> idx: LowStar.Vector.uint32_t{idx < LowStar.Vector.size_of rv}
-> FStar.HyperStack.ST.ST Prims.unit | {
"end_col": 5,
"end_line": 1110,
"start_col": 44,
"start_line": 1091
} |
FStar.HyperStack.ST.ST | val insert:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg{not (V.is_full rv)} -> v:a ->
HST.ST (rvector rg)
(requires (fun h0 ->
rv_inv h0 rv /\ rg_inv rg h0 v /\
HS.extends (Rgl?.region_of rg v) (V.frameOf rv) /\
V.forall_all h0 rv
(fun b -> HS.disjoint (Rgl?.region_of rg b)
(Rgl?.region_of rg v))))
(ensures (fun h0 irv h1 ->
V.size_of irv = V.size_of rv + 1ul /\
V.frameOf rv = V.frameOf irv /\
modifies (loc_union (V.loc_addr_of_vector rv)
(V.loc_vector irv)) h0 h1 /\
rv_inv h1 irv /\
V.get h1 irv (V.size_of rv) == v /\
S.equal (as_seq h1 irv)
(S.snoc (as_seq h0 rv) (Rgl?.r_repr rg h0 v)))) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let insert #a #rst #rg rv v =
let hh0 = HST.get () in
let irv = V.insert rv v in
let hh1 = HST.get () in
// Safety
rs_loc_elems_parent_disj
rg (V.as_seq hh0 rv) (V.frameOf rv) 0 (U32.v (V.size_of rv));
rs_elems_inv_preserved
rg (V.as_seq hh0 rv) 0 (U32.v (V.size_of rv))
(loc_region_only false (V.frameOf rv))
hh0 hh1;
Rgl?.r_sep rg v
(loc_region_only false (V.frameOf rv))
hh0 hh1;
// Correctness
assert (S.equal (V.as_seq hh0 rv)
(S.slice (V.as_seq hh1 irv) 0 (U32.v (V.size_of rv))));
as_seq_seq_preserved
rg (V.as_seq hh0 rv)
0 (U32.v (V.size_of rv))
(loc_region_only false (V.frameOf rv)) hh0 hh1;
as_seq_seq_slice
rg hh1 (V.as_seq hh1 irv) 0 (U32.v (V.size_of irv))
0 (U32.v (V.size_of rv));
irv | val insert:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg{not (V.is_full rv)} -> v:a ->
HST.ST (rvector rg)
(requires (fun h0 ->
rv_inv h0 rv /\ rg_inv rg h0 v /\
HS.extends (Rgl?.region_of rg v) (V.frameOf rv) /\
V.forall_all h0 rv
(fun b -> HS.disjoint (Rgl?.region_of rg b)
(Rgl?.region_of rg v))))
(ensures (fun h0 irv h1 ->
V.size_of irv = V.size_of rv + 1ul /\
V.frameOf rv = V.frameOf irv /\
modifies (loc_union (V.loc_addr_of_vector rv)
(V.loc_vector irv)) h0 h1 /\
rv_inv h1 irv /\
V.get h1 irv (V.size_of rv) == v /\
S.equal (as_seq h1 irv)
(S.snoc (as_seq h0 rv) (Rgl?.r_repr rg h0 v))))
let insert #a #rst #rg rv v = | true | null | false | let hh0 = HST.get () in
let irv = V.insert rv v in
let hh1 = HST.get () in
rs_loc_elems_parent_disj rg (V.as_seq hh0 rv) (V.frameOf rv) 0 (U32.v (V.size_of rv));
rs_elems_inv_preserved rg
(V.as_seq hh0 rv)
0
(U32.v (V.size_of rv))
(loc_region_only false (V.frameOf rv))
hh0
hh1;
Rgl?.r_sep rg v (loc_region_only false (V.frameOf rv)) hh0 hh1;
assert (S.equal (V.as_seq hh0 rv) (S.slice (V.as_seq hh1 irv) 0 (U32.v (V.size_of rv))));
as_seq_seq_preserved rg
(V.as_seq hh0 rv)
0
(U32.v (V.size_of rv))
(loc_region_only false (V.frameOf rv))
hh0
hh1;
as_seq_seq_slice rg hh1 (V.as_seq hh1 irv) 0 (U32.v (V.size_of irv)) 0 (U32.v (V.size_of rv));
irv | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [] | [
"LowStar.Regional.regional",
"LowStar.RVector.rvector",
"Prims.b2t",
"Prims.op_Negation",
"LowStar.Vector.is_full",
"Prims.unit",
"LowStar.RVector.as_seq_seq_slice",
"LowStar.Vector.as_seq",
"FStar.UInt32.v",
"LowStar.Vector.size_of",
"LowStar.RVector.as_seq_seq_preserved",
"LowStar.Monotonic.Buffer.loc_region_only",
"LowStar.Vector.frameOf",
"Prims._assert",
"FStar.Seq.Base.equal",
"FStar.Seq.Base.slice",
"LowStar.Regional.__proj__Rgl__item__r_sep",
"LowStar.RVector.rs_elems_inv_preserved",
"LowStar.RVector.rs_loc_elems_parent_disj",
"FStar.Monotonic.HyperStack.mem",
"FStar.HyperStack.ST.get",
"LowStar.Vector.vector",
"LowStar.Vector.insert"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1))
val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j)))
let rv_loc_elems_included #a #rst #rg h rv i j =
rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
// Properties about disjointness of locations
val rs_loc_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
l:nat{i <= l && l < j && k <> l} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj #a #rst rg rs prid i j k l = ()
val rs_loc_elem_disj_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (
forall (k:nat{i <= k && k < j}).
forall (l:nat{i <= l && l < j && k <> l}).
loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj_forall #a #rst rg rs prid i j = ()
val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2)
let rec rs_loc_elems_elem_disj #a #rst rg rs prid i j k1 k2 l =
if k1 = k2 then ()
else (rs_loc_elem_disj rg rs prid i j (k2 - 1) l;
rs_loc_elems_elem_disj rg rs prid i j k1 (k2 - 1) l)
val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2)
let rec rs_loc_elems_disj #a #rst rg rs prid i j k1 k2 l1 l2 =
if k1 = k2 then ()
else (rs_loc_elems_elem_disj rg rs prid i j l1 l2 (k2 - 1);
rs_loc_elems_disj rg rs prid i j k1 (k2 - 1) l1 l2)
val rv_loc_elems_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
k1:uint32_t{i <= k1} ->
k2:uint32_t{k1 <= k2 && k2 <= j} ->
l1:uint32_t{i <= l1} ->
l2:uint32_t{l1 <= l2 && l2 <= j} ->
Lemma (requires (rv_elems_reg h rv i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rv_loc_elems h rv k1 k2)
(rv_loc_elems h rv l1 l2)))
let rv_loc_elems_disj #a #rst #rg h rv i j k1 k2 l1 l2 =
rs_loc_elems_disj rg (V.as_seq h rv) (V.frameOf rv)
(U32.v i) (U32.v j) (U32.v k1) (U32.v k2) (U32.v l1) (U32.v l2)
val rs_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_region_only false prid)))
(decreases j)
let rec rs_loc_elems_parent_disj #a #rst rg rs prid i j =
if i = j then ()
else rs_loc_elems_parent_disj rg rs prid i (j - 1)
val rv_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_region_only false (V.frameOf rv))))
let rv_loc_elems_parent_disj #a #rst #rg h rv i j =
rs_loc_elems_parent_disj rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val rs_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> drid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (V.forall_seq rs i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_all_regions_from false drid)))
(decreases j)
let rec rs_loc_elems_each_disj #a #rst rg rs drid i j =
if i = j then ()
else rs_loc_elems_each_disj rg rs drid i (j - 1)
val rv_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
drid:HS.rid ->
Lemma (requires (V.forall_ h rv i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_all_regions_from false drid)))
let rv_loc_elems_each_disj #a #rst #rg h rv i j drid =
rs_loc_elems_each_disj rg (V.as_seq h rv) drid (U32.v i) (U32.v j)
// Preservation based on disjointness
val rv_loc_elems_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
loc_disjoint p (V.loc_vector_within rv i j) /\
modifies p h0 h1))
(ensures (rv_loc_elems h0 rv i j ==
rv_loc_elems h1 rv i j))
(decreases (U32.v j))
let rec rv_loc_elems_preserved #a #rst #rg rv i j p h0 h1 =
if i = j then ()
else (V.loc_vector_within_includes rv i j (j - 1ul) j;
V.get_preserved rv (j - 1ul) p h0 h1;
assert (V.get h0 rv (j - 1ul) == V.get h1 rv (j - 1ul));
V.loc_vector_within_includes rv i j i (j - 1ul);
rv_loc_elems_preserved rv i (j - 1ul) p h0 h1)
val rs_elems_inv_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv rg h1 rs i j))
(decreases j)
let rec rs_elems_inv_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val rv_elems_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (V.loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
modifies p h0 h1))
(ensures (rv_elems_inv h1 rv i j))
let rv_elems_inv_preserved #a #rst #rg rv i j p h0 h1 =
rs_elems_inv_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val rv_inv_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_ #a #rst #rg rv p h0 h1 =
rv_elems_inv_preserved #a #rst #rg rv 0ul (V.size_of rv) p h0 h1
// The first core lemma of `rvector`
val rv_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let rv_inv_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
rv_inv_preserved_ rv p h0 h1
val rv_inv_preserved_int:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
modifies (loc_all_regions_from false
(Rgl?.region_of rg (V.get h0 rv i))) h0 h1 /\
rg_inv rg h1 (V.get h1 rv i)))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_int #a #rst #rg rv i h0 h1 =
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv)) 0 (U32.v i) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) 0 (U32.v i)
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1;
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv))
(U32.v i + 1) (U32.v (V.size_of rv)) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) (U32.v i + 1) (U32.v (V.size_of rv))
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1
/// Representation
val as_seq_seq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = j - i})
(decreases j)
let rec as_seq_seq #a #rst rg h rs i j =
if i = j then S.empty
else S.snoc (as_seq_seq rg h rs i (j - 1))
(Rgl?.r_repr rg h (S.index rs (j - 1)))
val as_seq_sub:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t ->
j:uint32_t{
i <= j /\
j <= V.size_of rv /\
rv_elems_inv h rv i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v j - U32.v i})
(decreases (U32.v j))
let as_seq_sub #a #rst #rg h rv i j =
as_seq_seq rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v (V.size_of rv)})
let as_seq #a #rst #rg h rv =
as_seq_sub h rv 0ul (V.size_of rv)
val as_seq_sub_as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
Lemma (S.equal (as_seq_sub h rv 0ul (V.size_of rv))
(as_seq h rv))
[SMTPat (as_seq_sub h rv 0ul (V.size_of rv))]
let as_seq_sub_as_seq #a #rst #rg h rv = ()
val as_seq_seq_index:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat{k < j - i} ->
Lemma (requires true)
(ensures (S.index (as_seq_seq rg h rs i j) k ==
Rgl?.r_repr rg h (S.index rs (i + k))))
(decreases j)
[SMTPat (S.index (as_seq_seq rg h rs i j) k)]
let rec as_seq_seq_index #a #rst rg h rs i j k =
if i = j then ()
else if k = j - i - 1 then ()
else as_seq_seq_index rg h rs i (j - 1) k
val as_seq_seq_eq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs1:S.seq a -> rs2:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs1 /\ rs_elems_inv rg h rs1 i j} ->
k:nat ->
l:nat{k <= l /\ l <= S.length rs2 /\ rs_elems_inv rg h rs2 k l} ->
Lemma (requires (S.equal (S.slice rs1 i j) (S.slice rs2 k l)))
(ensures (S.equal (as_seq_seq rg h rs1 i j)
(as_seq_seq rg h rs2 k l)))
let as_seq_seq_eq #a #rst rg h rs1 rs2 i j k l =
assert (forall (a:nat{a < j - i}).
S.index (as_seq_seq rg h rs1 i j) a ==
Rgl?.r_repr rg h (S.index rs1 (i + a)));
assert (forall (a:nat{a < l - k}).
S.index (as_seq_seq rg h rs2 k l) a ==
Rgl?.r_repr rg h (S.index rs2 (k + a)));
assert (S.length (S.slice rs1 i j) = j - i);
assert (S.length (S.slice rs2 k l) = l - k);
assert (forall (a:nat{a < j - i}).
S.index (S.slice rs1 i j) a ==
S.index (S.slice rs2 k l) a);
assert (forall (a:nat{a < j - i}).
S.index rs1 (i + a) == S.index rs2 (k + a))
val as_seq_seq_slice:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat -> l:nat{k <= l && l <= j - i} ->
Lemma (S.equal (S.slice (as_seq_seq rg h rs i j) k l)
(as_seq_seq rg h (S.slice rs (i + k) (i + l)) 0 (l - k)))
#reset-options "--z3rlimit 10"
let rec as_seq_seq_slice #a #rst rg h rs i j k l =
if k = l then ()
else (as_seq_seq_slice rg h rs i j k (l - 1);
as_seq_seq_index rg h rs i j (l - 1);
as_seq_seq_eq rg h
(S.slice rs (i + k) (i + l - 1))
(S.slice rs (i + k) (i + l))
0 (l - k - 1) 0 (l - k - 1))
val as_seq_seq_upd:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{
i <= j /\
j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
k:nat{i <= k && k < j} -> v:a{rg_inv rg h v} ->
Lemma (S.equal (as_seq_seq rg h (S.upd rs k v) i j)
(S.upd (as_seq_seq rg h rs i j) (k - i)
(Rgl?.r_repr rg h v)))
let rec as_seq_seq_upd #a #rst rg h rs i j k v =
if i = j then ()
else if k = j - 1 then ()
else as_seq_seq_upd rg h rs i (j - 1) k v
// Preservation based on disjointness
val as_seq_seq_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv_preserved rg rs i j p h0 h1;
S.equal (as_seq_seq rg h0 rs i j)
(as_seq_seq rg h1 rs i j)))
let rec as_seq_seq_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
as_seq_seq_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val as_seq_sub_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
loc_disjoint p (V.loc_vector rv) /\
modifies p h0 h1))
(ensures (rv_elems_inv_preserved rv i j p h0 h1;
S.equal (as_seq_sub h0 rv i j)
(as_seq_sub h1 rv i j)))
let as_seq_sub_preserved #a #rst #rg rv i j p h0 h1 =
as_seq_seq_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val as_seq_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv_preserved_ rv p h0 h1;
S.equal (as_seq h0 rv) (as_seq h1 rv)))
let as_seq_preserved_ #a #rst #rg rv p h0 h1 =
as_seq_sub_preserved rv 0ul (V.size_of rv) p h0 h1
// The second core lemma of `rvector`
val as_seq_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv_preserved rv p h0 h1;
S.equal (as_seq h0 rv) (as_seq h1 rv)))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let as_seq_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
as_seq_preserved_ rv p h0 h1
/// Construction
val alloc_empty:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 bv h1 -> h0 == h1 /\ V.size_of bv = 0ul))
let alloc_empty #a #rst rg =
V.alloc_empty a
val alloc_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
cidx:uint32_t{cidx <= V.size_of rv} ->
HST.ST unit
(requires (fun h0 -> rv_itself_inv h0 rv))
(ensures (fun h0 _ h1 ->
modifies (V.loc_vector_within rv 0ul cidx) h0 h1 /\
rv_itself_inv h1 rv /\
rv_elems_inv h1 rv 0ul cidx /\
rv_elems_reg h1 rv 0ul cidx /\
S.equal (as_seq_sub h1 rv 0ul cidx)
(S.create (U32.v cidx) (Ghost.reveal (Rgl?.irepr rg))) /\
// the loop invariant for this function
V.forall_ h1 rv 0ul cidx
(fun r -> HS.fresh_region (Rgl?.region_of rg r) h0 h1 /\
Rgl?.r_alloc_p rg r) /\
Set.subset (Map.domain (HS.get_hmap h0))
(Map.domain (HS.get_hmap h1))))
(decreases (U32.v cidx))
#reset-options "--z3rlimit 20"
let rec alloc_ #a #rst #rg rv cidx =
let hh0 = HST.get () in
if cidx = 0ul then ()
else (let nrid = HST.new_region (V.frameOf rv) in
let v = rg_alloc rg nrid in
let hh1 = HST.get () in
V.assign rv (cidx - 1ul) v;
let hh2 = HST.get () in
V.loc_vector_within_included rv (cidx - 1ul) cidx;
Rgl?.r_sep
rg (V.get hh2 rv (cidx - 1ul))
(V.loc_vector_within rv (cidx - 1ul) cidx)
hh1 hh2;
alloc_ rv (cidx - 1ul);
let hh3 = HST.get () in
V.loc_vector_within_included rv 0ul (cidx - 1ul);
Rgl?.r_sep
rg (V.get hh3 rv (cidx - 1ul))
(V.loc_vector_within rv 0ul (cidx - 1ul))
hh2 hh3;
V.forall2_extend hh3 rv 0ul (cidx - 1ul)
(fun r1 r2 -> HS.disjoint (Rgl?.region_of rg r1)
(Rgl?.region_of rg r2));
V.loc_vector_within_union_rev rv 0ul cidx)
val alloc_rid:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} -> rid:HST.erid ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
V.frameOf rv = rid /\
V.size_of rv = len /\
V.forall_all h1 rv (fun r -> Rgl?.r_alloc_p rg r) /\
S.equal (as_seq h1 rv)
(S.create (U32.v len) (Ghost.reveal (Rgl?.irepr rg)))))
let alloc_rid #a #rst rg len rid =
let vec = V.alloc_rid len (rg_dummy rg) rid in
alloc_ #a #rst #rg vec len;
V.loc_vector_within_included vec 0ul len;
vec
val alloc_reserve:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} -> rid:HST.erid ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
V.frameOf rv = rid /\
V.size_of rv = 0ul /\
S.equal (as_seq h1 rv) S.empty /\
Set.equal (Map.domain (HS.get_hmap h0))
(Map.domain (HS.get_hmap h1)) /\
B.fresh_loc (V.loc_vector rv) h0 h1))
let alloc_reserve #a #rst rg len rid =
V.alloc_reserve len (rg_dummy rg) rid
val alloc:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
HS.fresh_region (V.frameOf rv) h0 h1 /\
V.size_of rv = len /\
V.forall_all h1 rv (fun r -> Rgl?.r_alloc_p rg r) /\
S.equal (as_seq h1 rv)
(S.create (U32.v len) (Ghost.reveal (Rgl?.irepr rg)))))
let alloc #a #rst rg len =
let nrid = HST.new_region HS.root in
alloc_rid rg len nrid
val insert:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg{not (V.is_full rv)} -> v:a ->
HST.ST (rvector rg)
(requires (fun h0 ->
rv_inv h0 rv /\ rg_inv rg h0 v /\
HS.extends (Rgl?.region_of rg v) (V.frameOf rv) /\
V.forall_all h0 rv
(fun b -> HS.disjoint (Rgl?.region_of rg b)
(Rgl?.region_of rg v))))
(ensures (fun h0 irv h1 ->
V.size_of irv = V.size_of rv + 1ul /\
V.frameOf rv = V.frameOf irv /\
modifies (loc_union (V.loc_addr_of_vector rv)
(V.loc_vector irv)) h0 h1 /\
rv_inv h1 irv /\
V.get h1 irv (V.size_of rv) == v /\
S.equal (as_seq h1 irv)
(S.snoc (as_seq h0 rv) (Rgl?.r_repr rg h0 v)))) | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val insert:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg{not (V.is_full rv)} -> v:a ->
HST.ST (rvector rg)
(requires (fun h0 ->
rv_inv h0 rv /\ rg_inv rg h0 v /\
HS.extends (Rgl?.region_of rg v) (V.frameOf rv) /\
V.forall_all h0 rv
(fun b -> HS.disjoint (Rgl?.region_of rg b)
(Rgl?.region_of rg v))))
(ensures (fun h0 irv h1 ->
V.size_of irv = V.size_of rv + 1ul /\
V.frameOf rv = V.frameOf irv /\
modifies (loc_union (V.loc_addr_of_vector rv)
(V.loc_vector irv)) h0 h1 /\
rv_inv h1 irv /\
V.get h1 irv (V.size_of rv) == v /\
S.equal (as_seq h1 irv)
(S.snoc (as_seq h0 rv) (Rgl?.r_repr rg h0 v)))) | [] | LowStar.RVector.insert | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | rv: LowStar.RVector.rvector rg {Prims.op_Negation (LowStar.Vector.is_full rv)} -> v: a
-> FStar.HyperStack.ST.ST (LowStar.RVector.rvector rg) | {
"end_col": 5,
"end_line": 841,
"start_col": 29,
"start_line": 815
} |
FStar.HyperStack.ST.ST | val alloc_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
cidx:uint32_t{cidx <= V.size_of rv} ->
HST.ST unit
(requires (fun h0 -> rv_itself_inv h0 rv))
(ensures (fun h0 _ h1 ->
modifies (V.loc_vector_within rv 0ul cidx) h0 h1 /\
rv_itself_inv h1 rv /\
rv_elems_inv h1 rv 0ul cidx /\
rv_elems_reg h1 rv 0ul cidx /\
S.equal (as_seq_sub h1 rv 0ul cidx)
(S.create (U32.v cidx) (Ghost.reveal (Rgl?.irepr rg))) /\
// the loop invariant for this function
V.forall_ h1 rv 0ul cidx
(fun r -> HS.fresh_region (Rgl?.region_of rg r) h0 h1 /\
Rgl?.r_alloc_p rg r) /\
Set.subset (Map.domain (HS.get_hmap h0))
(Map.domain (HS.get_hmap h1))))
(decreases (U32.v cidx)) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rec alloc_ #a #rst #rg rv cidx =
let hh0 = HST.get () in
if cidx = 0ul then ()
else (let nrid = HST.new_region (V.frameOf rv) in
let v = rg_alloc rg nrid in
let hh1 = HST.get () in
V.assign rv (cidx - 1ul) v;
let hh2 = HST.get () in
V.loc_vector_within_included rv (cidx - 1ul) cidx;
Rgl?.r_sep
rg (V.get hh2 rv (cidx - 1ul))
(V.loc_vector_within rv (cidx - 1ul) cidx)
hh1 hh2;
alloc_ rv (cidx - 1ul);
let hh3 = HST.get () in
V.loc_vector_within_included rv 0ul (cidx - 1ul);
Rgl?.r_sep
rg (V.get hh3 rv (cidx - 1ul))
(V.loc_vector_within rv 0ul (cidx - 1ul))
hh2 hh3;
V.forall2_extend hh3 rv 0ul (cidx - 1ul)
(fun r1 r2 -> HS.disjoint (Rgl?.region_of rg r1)
(Rgl?.region_of rg r2));
V.loc_vector_within_union_rev rv 0ul cidx) | val alloc_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
cidx:uint32_t{cidx <= V.size_of rv} ->
HST.ST unit
(requires (fun h0 -> rv_itself_inv h0 rv))
(ensures (fun h0 _ h1 ->
modifies (V.loc_vector_within rv 0ul cidx) h0 h1 /\
rv_itself_inv h1 rv /\
rv_elems_inv h1 rv 0ul cidx /\
rv_elems_reg h1 rv 0ul cidx /\
S.equal (as_seq_sub h1 rv 0ul cidx)
(S.create (U32.v cidx) (Ghost.reveal (Rgl?.irepr rg))) /\
// the loop invariant for this function
V.forall_ h1 rv 0ul cidx
(fun r -> HS.fresh_region (Rgl?.region_of rg r) h0 h1 /\
Rgl?.r_alloc_p rg r) /\
Set.subset (Map.domain (HS.get_hmap h0))
(Map.domain (HS.get_hmap h1))))
(decreases (U32.v cidx))
let rec alloc_ #a #rst #rg rv cidx = | true | null | false | let hh0 = HST.get () in
if cidx = 0ul
then ()
else
(let nrid = HST.new_region (V.frameOf rv) in
let v = rg_alloc rg nrid in
let hh1 = HST.get () in
V.assign rv (cidx - 1ul) v;
let hh2 = HST.get () in
V.loc_vector_within_included rv (cidx - 1ul) cidx;
Rgl?.r_sep rg (V.get hh2 rv (cidx - 1ul)) (V.loc_vector_within rv (cidx - 1ul) cidx) hh1 hh2;
alloc_ rv (cidx - 1ul);
let hh3 = HST.get () in
V.loc_vector_within_included rv 0ul (cidx - 1ul);
Rgl?.r_sep rg (V.get hh3 rv (cidx - 1ul)) (V.loc_vector_within rv 0ul (cidx - 1ul)) hh2 hh3;
V.forall2_extend hh3
rv
0ul
(cidx - 1ul)
(fun r1 r2 -> HS.disjoint (Rgl?.region_of rg r1) (Rgl?.region_of rg r2));
V.loc_vector_within_union_rev rv 0ul cidx) | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [
""
] | [
"LowStar.Regional.regional",
"LowStar.RVector.rvector",
"LowStar.Vector.uint32_t",
"Prims.b2t",
"FStar.Integers.op_Less_Equals",
"FStar.Integers.Unsigned",
"FStar.Integers.W32",
"LowStar.Vector.size_of",
"Prims.op_Equality",
"FStar.UInt32.t",
"FStar.UInt32.__uint_to_t",
"Prims.unit",
"Prims.bool",
"LowStar.Vector.loc_vector_within_union_rev",
"LowStar.Vector.forall2_extend",
"FStar.Integers.op_Subtraction",
"FStar.Monotonic.HyperHeap.disjoint",
"LowStar.Regional.__proj__Rgl__item__region_of",
"LowStar.Regional.__proj__Rgl__item__r_sep",
"LowStar.Vector.get",
"LowStar.Vector.loc_vector_within",
"LowStar.Vector.loc_vector_within_included",
"FStar.Monotonic.HyperStack.mem",
"FStar.HyperStack.ST.get",
"LowStar.RVector.alloc_",
"LowStar.Vector.assign",
"LowStar.Regional.rg_alloc",
"FStar.Monotonic.HyperHeap.rid",
"FStar.HyperStack.ST.new_region",
"LowStar.Vector.frameOf"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1))
val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j)))
let rv_loc_elems_included #a #rst #rg h rv i j =
rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
// Properties about disjointness of locations
val rs_loc_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
l:nat{i <= l && l < j && k <> l} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj #a #rst rg rs prid i j k l = ()
val rs_loc_elem_disj_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (
forall (k:nat{i <= k && k < j}).
forall (l:nat{i <= l && l < j && k <> l}).
loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj_forall #a #rst rg rs prid i j = ()
val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2)
let rec rs_loc_elems_elem_disj #a #rst rg rs prid i j k1 k2 l =
if k1 = k2 then ()
else (rs_loc_elem_disj rg rs prid i j (k2 - 1) l;
rs_loc_elems_elem_disj rg rs prid i j k1 (k2 - 1) l)
val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2)
let rec rs_loc_elems_disj #a #rst rg rs prid i j k1 k2 l1 l2 =
if k1 = k2 then ()
else (rs_loc_elems_elem_disj rg rs prid i j l1 l2 (k2 - 1);
rs_loc_elems_disj rg rs prid i j k1 (k2 - 1) l1 l2)
val rv_loc_elems_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
k1:uint32_t{i <= k1} ->
k2:uint32_t{k1 <= k2 && k2 <= j} ->
l1:uint32_t{i <= l1} ->
l2:uint32_t{l1 <= l2 && l2 <= j} ->
Lemma (requires (rv_elems_reg h rv i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rv_loc_elems h rv k1 k2)
(rv_loc_elems h rv l1 l2)))
let rv_loc_elems_disj #a #rst #rg h rv i j k1 k2 l1 l2 =
rs_loc_elems_disj rg (V.as_seq h rv) (V.frameOf rv)
(U32.v i) (U32.v j) (U32.v k1) (U32.v k2) (U32.v l1) (U32.v l2)
val rs_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_region_only false prid)))
(decreases j)
let rec rs_loc_elems_parent_disj #a #rst rg rs prid i j =
if i = j then ()
else rs_loc_elems_parent_disj rg rs prid i (j - 1)
val rv_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_region_only false (V.frameOf rv))))
let rv_loc_elems_parent_disj #a #rst #rg h rv i j =
rs_loc_elems_parent_disj rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val rs_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> drid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (V.forall_seq rs i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_all_regions_from false drid)))
(decreases j)
let rec rs_loc_elems_each_disj #a #rst rg rs drid i j =
if i = j then ()
else rs_loc_elems_each_disj rg rs drid i (j - 1)
val rv_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
drid:HS.rid ->
Lemma (requires (V.forall_ h rv i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_all_regions_from false drid)))
let rv_loc_elems_each_disj #a #rst #rg h rv i j drid =
rs_loc_elems_each_disj rg (V.as_seq h rv) drid (U32.v i) (U32.v j)
// Preservation based on disjointness
val rv_loc_elems_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
loc_disjoint p (V.loc_vector_within rv i j) /\
modifies p h0 h1))
(ensures (rv_loc_elems h0 rv i j ==
rv_loc_elems h1 rv i j))
(decreases (U32.v j))
let rec rv_loc_elems_preserved #a #rst #rg rv i j p h0 h1 =
if i = j then ()
else (V.loc_vector_within_includes rv i j (j - 1ul) j;
V.get_preserved rv (j - 1ul) p h0 h1;
assert (V.get h0 rv (j - 1ul) == V.get h1 rv (j - 1ul));
V.loc_vector_within_includes rv i j i (j - 1ul);
rv_loc_elems_preserved rv i (j - 1ul) p h0 h1)
val rs_elems_inv_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv rg h1 rs i j))
(decreases j)
let rec rs_elems_inv_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val rv_elems_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (V.loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
modifies p h0 h1))
(ensures (rv_elems_inv h1 rv i j))
let rv_elems_inv_preserved #a #rst #rg rv i j p h0 h1 =
rs_elems_inv_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val rv_inv_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_ #a #rst #rg rv p h0 h1 =
rv_elems_inv_preserved #a #rst #rg rv 0ul (V.size_of rv) p h0 h1
// The first core lemma of `rvector`
val rv_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let rv_inv_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
rv_inv_preserved_ rv p h0 h1
val rv_inv_preserved_int:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
modifies (loc_all_regions_from false
(Rgl?.region_of rg (V.get h0 rv i))) h0 h1 /\
rg_inv rg h1 (V.get h1 rv i)))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_int #a #rst #rg rv i h0 h1 =
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv)) 0 (U32.v i) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) 0 (U32.v i)
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1;
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv))
(U32.v i + 1) (U32.v (V.size_of rv)) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) (U32.v i + 1) (U32.v (V.size_of rv))
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1
/// Representation
val as_seq_seq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = j - i})
(decreases j)
let rec as_seq_seq #a #rst rg h rs i j =
if i = j then S.empty
else S.snoc (as_seq_seq rg h rs i (j - 1))
(Rgl?.r_repr rg h (S.index rs (j - 1)))
val as_seq_sub:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t ->
j:uint32_t{
i <= j /\
j <= V.size_of rv /\
rv_elems_inv h rv i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v j - U32.v i})
(decreases (U32.v j))
let as_seq_sub #a #rst #rg h rv i j =
as_seq_seq rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v (V.size_of rv)})
let as_seq #a #rst #rg h rv =
as_seq_sub h rv 0ul (V.size_of rv)
val as_seq_sub_as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
Lemma (S.equal (as_seq_sub h rv 0ul (V.size_of rv))
(as_seq h rv))
[SMTPat (as_seq_sub h rv 0ul (V.size_of rv))]
let as_seq_sub_as_seq #a #rst #rg h rv = ()
val as_seq_seq_index:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat{k < j - i} ->
Lemma (requires true)
(ensures (S.index (as_seq_seq rg h rs i j) k ==
Rgl?.r_repr rg h (S.index rs (i + k))))
(decreases j)
[SMTPat (S.index (as_seq_seq rg h rs i j) k)]
let rec as_seq_seq_index #a #rst rg h rs i j k =
if i = j then ()
else if k = j - i - 1 then ()
else as_seq_seq_index rg h rs i (j - 1) k
val as_seq_seq_eq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs1:S.seq a -> rs2:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs1 /\ rs_elems_inv rg h rs1 i j} ->
k:nat ->
l:nat{k <= l /\ l <= S.length rs2 /\ rs_elems_inv rg h rs2 k l} ->
Lemma (requires (S.equal (S.slice rs1 i j) (S.slice rs2 k l)))
(ensures (S.equal (as_seq_seq rg h rs1 i j)
(as_seq_seq rg h rs2 k l)))
let as_seq_seq_eq #a #rst rg h rs1 rs2 i j k l =
assert (forall (a:nat{a < j - i}).
S.index (as_seq_seq rg h rs1 i j) a ==
Rgl?.r_repr rg h (S.index rs1 (i + a)));
assert (forall (a:nat{a < l - k}).
S.index (as_seq_seq rg h rs2 k l) a ==
Rgl?.r_repr rg h (S.index rs2 (k + a)));
assert (S.length (S.slice rs1 i j) = j - i);
assert (S.length (S.slice rs2 k l) = l - k);
assert (forall (a:nat{a < j - i}).
S.index (S.slice rs1 i j) a ==
S.index (S.slice rs2 k l) a);
assert (forall (a:nat{a < j - i}).
S.index rs1 (i + a) == S.index rs2 (k + a))
val as_seq_seq_slice:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat -> l:nat{k <= l && l <= j - i} ->
Lemma (S.equal (S.slice (as_seq_seq rg h rs i j) k l)
(as_seq_seq rg h (S.slice rs (i + k) (i + l)) 0 (l - k)))
#reset-options "--z3rlimit 10"
let rec as_seq_seq_slice #a #rst rg h rs i j k l =
if k = l then ()
else (as_seq_seq_slice rg h rs i j k (l - 1);
as_seq_seq_index rg h rs i j (l - 1);
as_seq_seq_eq rg h
(S.slice rs (i + k) (i + l - 1))
(S.slice rs (i + k) (i + l))
0 (l - k - 1) 0 (l - k - 1))
val as_seq_seq_upd:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{
i <= j /\
j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
k:nat{i <= k && k < j} -> v:a{rg_inv rg h v} ->
Lemma (S.equal (as_seq_seq rg h (S.upd rs k v) i j)
(S.upd (as_seq_seq rg h rs i j) (k - i)
(Rgl?.r_repr rg h v)))
let rec as_seq_seq_upd #a #rst rg h rs i j k v =
if i = j then ()
else if k = j - 1 then ()
else as_seq_seq_upd rg h rs i (j - 1) k v
// Preservation based on disjointness
val as_seq_seq_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv_preserved rg rs i j p h0 h1;
S.equal (as_seq_seq rg h0 rs i j)
(as_seq_seq rg h1 rs i j)))
let rec as_seq_seq_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
as_seq_seq_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val as_seq_sub_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
loc_disjoint p (V.loc_vector rv) /\
modifies p h0 h1))
(ensures (rv_elems_inv_preserved rv i j p h0 h1;
S.equal (as_seq_sub h0 rv i j)
(as_seq_sub h1 rv i j)))
let as_seq_sub_preserved #a #rst #rg rv i j p h0 h1 =
as_seq_seq_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val as_seq_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv_preserved_ rv p h0 h1;
S.equal (as_seq h0 rv) (as_seq h1 rv)))
let as_seq_preserved_ #a #rst #rg rv p h0 h1 =
as_seq_sub_preserved rv 0ul (V.size_of rv) p h0 h1
// The second core lemma of `rvector`
val as_seq_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv_preserved rv p h0 h1;
S.equal (as_seq h0 rv) (as_seq h1 rv)))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let as_seq_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
as_seq_preserved_ rv p h0 h1
/// Construction
val alloc_empty:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 bv h1 -> h0 == h1 /\ V.size_of bv = 0ul))
let alloc_empty #a #rst rg =
V.alloc_empty a
val alloc_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
cidx:uint32_t{cidx <= V.size_of rv} ->
HST.ST unit
(requires (fun h0 -> rv_itself_inv h0 rv))
(ensures (fun h0 _ h1 ->
modifies (V.loc_vector_within rv 0ul cidx) h0 h1 /\
rv_itself_inv h1 rv /\
rv_elems_inv h1 rv 0ul cidx /\
rv_elems_reg h1 rv 0ul cidx /\
S.equal (as_seq_sub h1 rv 0ul cidx)
(S.create (U32.v cidx) (Ghost.reveal (Rgl?.irepr rg))) /\
// the loop invariant for this function
V.forall_ h1 rv 0ul cidx
(fun r -> HS.fresh_region (Rgl?.region_of rg r) h0 h1 /\
Rgl?.r_alloc_p rg r) /\
Set.subset (Map.domain (HS.get_hmap h0))
(Map.domain (HS.get_hmap h1))))
(decreases (U32.v cidx)) | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val alloc_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
cidx:uint32_t{cidx <= V.size_of rv} ->
HST.ST unit
(requires (fun h0 -> rv_itself_inv h0 rv))
(ensures (fun h0 _ h1 ->
modifies (V.loc_vector_within rv 0ul cidx) h0 h1 /\
rv_itself_inv h1 rv /\
rv_elems_inv h1 rv 0ul cidx /\
rv_elems_reg h1 rv 0ul cidx /\
S.equal (as_seq_sub h1 rv 0ul cidx)
(S.create (U32.v cidx) (Ghost.reveal (Rgl?.irepr rg))) /\
// the loop invariant for this function
V.forall_ h1 rv 0ul cidx
(fun r -> HS.fresh_region (Rgl?.region_of rg r) h0 h1 /\
Rgl?.r_alloc_p rg r) /\
Set.subset (Map.domain (HS.get_hmap h0))
(Map.domain (HS.get_hmap h1))))
(decreases (U32.v cidx)) | [
"recursion"
] | LowStar.RVector.alloc_ | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | rv: LowStar.RVector.rvector rg -> cidx: LowStar.Vector.uint32_t{cidx <= LowStar.Vector.size_of rv}
-> FStar.HyperStack.ST.ST Prims.unit | {
"end_col": 49,
"end_line": 740,
"start_col": 36,
"start_line": 714
} |
FStar.HyperStack.ST.ST | val assign:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} -> v:a ->
HST.ST unit
(requires (fun h0 ->
// rv_inv h0 rv /\
rv_itself_inv h0 rv /\
rv_elems_inv h0 rv 0ul i /\
rv_elems_inv h0 rv (i + 1ul) (V.size_of rv) /\
elems_reg h0 rv /\
V.forall_ h0 rv 0ul i
(fun b -> HS.disjoint (Rgl?.region_of rg b)
(Rgl?.region_of rg v)) /\
V.forall_ h0 rv (i + 1ul) (V.size_of rv)
(fun b -> HS.disjoint (Rgl?.region_of rg b)
(Rgl?.region_of rg v)) /\
rg_inv rg h0 v /\
HS.extends (Rgl?.region_of rg v) (V.frameOf rv)))
(ensures (fun h0 _ h1 ->
modifies (V.loc_vector_within rv i (i + 1ul)) h0 h1 /\
rv_inv h1 rv /\
V.get h1 rv i == v /\
S.equal (as_seq h1 rv)
(S.append
(as_seq_sub h0 rv 0ul i)
(S.cons (Rgl?.r_repr rg h0 v)
(as_seq_sub h0 rv (i + 1ul) (V.size_of rv)))))) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let assign #a #rst #rg rv i v =
let hh0 = HST.get () in
V.assign rv i v;
let hh1 = HST.get () in
// Safety
rs_loc_elems_parent_disj
rg (V.as_seq hh0 rv) (V.frameOf rv) 0 (U32.v i);
rs_loc_elems_parent_disj
rg (V.as_seq hh0 rv) (V.frameOf rv) (U32.v i + 1) (U32.v (V.size_of rv));
rs_elems_inv_preserved
rg (V.as_seq hh0 rv) 0 (U32.v i)
(V.loc_vector rv)
hh0 hh1;
rs_elems_inv_preserved
rg (V.as_seq hh0 rv) (U32.v i + 1) (U32.v (V.size_of rv))
(V.loc_vector rv)
hh0 hh1;
Rgl?.r_sep rg v (V.loc_vector rv) hh0 hh1;
// Correctness
rs_loc_elems_parent_disj
rg (V.as_seq hh1 rv) (V.frameOf rv) 0 (U32.v (V.size_of rv));
as_seq_seq_preserved
rg (V.as_seq hh1 rv)
0 (U32.v (V.size_of rv))
(V.loc_vector rv) hh0 hh1 | val assign:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} -> v:a ->
HST.ST unit
(requires (fun h0 ->
// rv_inv h0 rv /\
rv_itself_inv h0 rv /\
rv_elems_inv h0 rv 0ul i /\
rv_elems_inv h0 rv (i + 1ul) (V.size_of rv) /\
elems_reg h0 rv /\
V.forall_ h0 rv 0ul i
(fun b -> HS.disjoint (Rgl?.region_of rg b)
(Rgl?.region_of rg v)) /\
V.forall_ h0 rv (i + 1ul) (V.size_of rv)
(fun b -> HS.disjoint (Rgl?.region_of rg b)
(Rgl?.region_of rg v)) /\
rg_inv rg h0 v /\
HS.extends (Rgl?.region_of rg v) (V.frameOf rv)))
(ensures (fun h0 _ h1 ->
modifies (V.loc_vector_within rv i (i + 1ul)) h0 h1 /\
rv_inv h1 rv /\
V.get h1 rv i == v /\
S.equal (as_seq h1 rv)
(S.append
(as_seq_sub h0 rv 0ul i)
(S.cons (Rgl?.r_repr rg h0 v)
(as_seq_sub h0 rv (i + 1ul) (V.size_of rv))))))
let assign #a #rst #rg rv i v = | true | null | false | let hh0 = HST.get () in
V.assign rv i v;
let hh1 = HST.get () in
rs_loc_elems_parent_disj rg (V.as_seq hh0 rv) (V.frameOf rv) 0 (U32.v i);
rs_loc_elems_parent_disj rg (V.as_seq hh0 rv) (V.frameOf rv) (U32.v i + 1) (U32.v (V.size_of rv));
rs_elems_inv_preserved rg (V.as_seq hh0 rv) 0 (U32.v i) (V.loc_vector rv) hh0 hh1;
rs_elems_inv_preserved rg
(V.as_seq hh0 rv)
(U32.v i + 1)
(U32.v (V.size_of rv))
(V.loc_vector rv)
hh0
hh1;
Rgl?.r_sep rg v (V.loc_vector rv) hh0 hh1;
rs_loc_elems_parent_disj rg (V.as_seq hh1 rv) (V.frameOf rv) 0 (U32.v (V.size_of rv));
as_seq_seq_preserved rg (V.as_seq hh1 rv) 0 (U32.v (V.size_of rv)) (V.loc_vector rv) hh0 hh1 | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [] | [
"LowStar.Regional.regional",
"LowStar.RVector.rvector",
"LowStar.Vector.uint32_t",
"Prims.b2t",
"FStar.Integers.op_Less",
"FStar.Integers.Unsigned",
"FStar.Integers.W32",
"LowStar.Vector.size_of",
"LowStar.RVector.as_seq_seq_preserved",
"LowStar.Vector.as_seq",
"FStar.UInt32.v",
"LowStar.Vector.loc_vector",
"Prims.unit",
"LowStar.RVector.rs_loc_elems_parent_disj",
"LowStar.Vector.frameOf",
"LowStar.Regional.__proj__Rgl__item__r_sep",
"LowStar.RVector.rs_elems_inv_preserved",
"FStar.Integers.op_Plus",
"FStar.Integers.Signed",
"FStar.Integers.Winfinite",
"FStar.Monotonic.HyperStack.mem",
"FStar.HyperStack.ST.get",
"LowStar.Vector.assign"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1))
val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j)))
let rv_loc_elems_included #a #rst #rg h rv i j =
rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
// Properties about disjointness of locations
val rs_loc_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
l:nat{i <= l && l < j && k <> l} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj #a #rst rg rs prid i j k l = ()
val rs_loc_elem_disj_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (
forall (k:nat{i <= k && k < j}).
forall (l:nat{i <= l && l < j && k <> l}).
loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj_forall #a #rst rg rs prid i j = ()
val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2)
let rec rs_loc_elems_elem_disj #a #rst rg rs prid i j k1 k2 l =
if k1 = k2 then ()
else (rs_loc_elem_disj rg rs prid i j (k2 - 1) l;
rs_loc_elems_elem_disj rg rs prid i j k1 (k2 - 1) l)
val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2)
let rec rs_loc_elems_disj #a #rst rg rs prid i j k1 k2 l1 l2 =
if k1 = k2 then ()
else (rs_loc_elems_elem_disj rg rs prid i j l1 l2 (k2 - 1);
rs_loc_elems_disj rg rs prid i j k1 (k2 - 1) l1 l2)
val rv_loc_elems_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
k1:uint32_t{i <= k1} ->
k2:uint32_t{k1 <= k2 && k2 <= j} ->
l1:uint32_t{i <= l1} ->
l2:uint32_t{l1 <= l2 && l2 <= j} ->
Lemma (requires (rv_elems_reg h rv i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rv_loc_elems h rv k1 k2)
(rv_loc_elems h rv l1 l2)))
let rv_loc_elems_disj #a #rst #rg h rv i j k1 k2 l1 l2 =
rs_loc_elems_disj rg (V.as_seq h rv) (V.frameOf rv)
(U32.v i) (U32.v j) (U32.v k1) (U32.v k2) (U32.v l1) (U32.v l2)
val rs_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_region_only false prid)))
(decreases j)
let rec rs_loc_elems_parent_disj #a #rst rg rs prid i j =
if i = j then ()
else rs_loc_elems_parent_disj rg rs prid i (j - 1)
val rv_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_region_only false (V.frameOf rv))))
let rv_loc_elems_parent_disj #a #rst #rg h rv i j =
rs_loc_elems_parent_disj rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val rs_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> drid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (V.forall_seq rs i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_all_regions_from false drid)))
(decreases j)
let rec rs_loc_elems_each_disj #a #rst rg rs drid i j =
if i = j then ()
else rs_loc_elems_each_disj rg rs drid i (j - 1)
val rv_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
drid:HS.rid ->
Lemma (requires (V.forall_ h rv i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_all_regions_from false drid)))
let rv_loc_elems_each_disj #a #rst #rg h rv i j drid =
rs_loc_elems_each_disj rg (V.as_seq h rv) drid (U32.v i) (U32.v j)
// Preservation based on disjointness
val rv_loc_elems_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
loc_disjoint p (V.loc_vector_within rv i j) /\
modifies p h0 h1))
(ensures (rv_loc_elems h0 rv i j ==
rv_loc_elems h1 rv i j))
(decreases (U32.v j))
let rec rv_loc_elems_preserved #a #rst #rg rv i j p h0 h1 =
if i = j then ()
else (V.loc_vector_within_includes rv i j (j - 1ul) j;
V.get_preserved rv (j - 1ul) p h0 h1;
assert (V.get h0 rv (j - 1ul) == V.get h1 rv (j - 1ul));
V.loc_vector_within_includes rv i j i (j - 1ul);
rv_loc_elems_preserved rv i (j - 1ul) p h0 h1)
val rs_elems_inv_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv rg h1 rs i j))
(decreases j)
let rec rs_elems_inv_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val rv_elems_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (V.loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
modifies p h0 h1))
(ensures (rv_elems_inv h1 rv i j))
let rv_elems_inv_preserved #a #rst #rg rv i j p h0 h1 =
rs_elems_inv_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val rv_inv_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_ #a #rst #rg rv p h0 h1 =
rv_elems_inv_preserved #a #rst #rg rv 0ul (V.size_of rv) p h0 h1
// The first core lemma of `rvector`
val rv_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let rv_inv_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
rv_inv_preserved_ rv p h0 h1
val rv_inv_preserved_int:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
modifies (loc_all_regions_from false
(Rgl?.region_of rg (V.get h0 rv i))) h0 h1 /\
rg_inv rg h1 (V.get h1 rv i)))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_int #a #rst #rg rv i h0 h1 =
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv)) 0 (U32.v i) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) 0 (U32.v i)
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1;
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv))
(U32.v i + 1) (U32.v (V.size_of rv)) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) (U32.v i + 1) (U32.v (V.size_of rv))
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1
/// Representation
val as_seq_seq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = j - i})
(decreases j)
let rec as_seq_seq #a #rst rg h rs i j =
if i = j then S.empty
else S.snoc (as_seq_seq rg h rs i (j - 1))
(Rgl?.r_repr rg h (S.index rs (j - 1)))
val as_seq_sub:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t ->
j:uint32_t{
i <= j /\
j <= V.size_of rv /\
rv_elems_inv h rv i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v j - U32.v i})
(decreases (U32.v j))
let as_seq_sub #a #rst #rg h rv i j =
as_seq_seq rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v (V.size_of rv)})
let as_seq #a #rst #rg h rv =
as_seq_sub h rv 0ul (V.size_of rv)
val as_seq_sub_as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
Lemma (S.equal (as_seq_sub h rv 0ul (V.size_of rv))
(as_seq h rv))
[SMTPat (as_seq_sub h rv 0ul (V.size_of rv))]
let as_seq_sub_as_seq #a #rst #rg h rv = ()
val as_seq_seq_index:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat{k < j - i} ->
Lemma (requires true)
(ensures (S.index (as_seq_seq rg h rs i j) k ==
Rgl?.r_repr rg h (S.index rs (i + k))))
(decreases j)
[SMTPat (S.index (as_seq_seq rg h rs i j) k)]
let rec as_seq_seq_index #a #rst rg h rs i j k =
if i = j then ()
else if k = j - i - 1 then ()
else as_seq_seq_index rg h rs i (j - 1) k
val as_seq_seq_eq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs1:S.seq a -> rs2:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs1 /\ rs_elems_inv rg h rs1 i j} ->
k:nat ->
l:nat{k <= l /\ l <= S.length rs2 /\ rs_elems_inv rg h rs2 k l} ->
Lemma (requires (S.equal (S.slice rs1 i j) (S.slice rs2 k l)))
(ensures (S.equal (as_seq_seq rg h rs1 i j)
(as_seq_seq rg h rs2 k l)))
let as_seq_seq_eq #a #rst rg h rs1 rs2 i j k l =
assert (forall (a:nat{a < j - i}).
S.index (as_seq_seq rg h rs1 i j) a ==
Rgl?.r_repr rg h (S.index rs1 (i + a)));
assert (forall (a:nat{a < l - k}).
S.index (as_seq_seq rg h rs2 k l) a ==
Rgl?.r_repr rg h (S.index rs2 (k + a)));
assert (S.length (S.slice rs1 i j) = j - i);
assert (S.length (S.slice rs2 k l) = l - k);
assert (forall (a:nat{a < j - i}).
S.index (S.slice rs1 i j) a ==
S.index (S.slice rs2 k l) a);
assert (forall (a:nat{a < j - i}).
S.index rs1 (i + a) == S.index rs2 (k + a))
val as_seq_seq_slice:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat -> l:nat{k <= l && l <= j - i} ->
Lemma (S.equal (S.slice (as_seq_seq rg h rs i j) k l)
(as_seq_seq rg h (S.slice rs (i + k) (i + l)) 0 (l - k)))
#reset-options "--z3rlimit 10"
let rec as_seq_seq_slice #a #rst rg h rs i j k l =
if k = l then ()
else (as_seq_seq_slice rg h rs i j k (l - 1);
as_seq_seq_index rg h rs i j (l - 1);
as_seq_seq_eq rg h
(S.slice rs (i + k) (i + l - 1))
(S.slice rs (i + k) (i + l))
0 (l - k - 1) 0 (l - k - 1))
val as_seq_seq_upd:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{
i <= j /\
j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
k:nat{i <= k && k < j} -> v:a{rg_inv rg h v} ->
Lemma (S.equal (as_seq_seq rg h (S.upd rs k v) i j)
(S.upd (as_seq_seq rg h rs i j) (k - i)
(Rgl?.r_repr rg h v)))
let rec as_seq_seq_upd #a #rst rg h rs i j k v =
if i = j then ()
else if k = j - 1 then ()
else as_seq_seq_upd rg h rs i (j - 1) k v
// Preservation based on disjointness
val as_seq_seq_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv_preserved rg rs i j p h0 h1;
S.equal (as_seq_seq rg h0 rs i j)
(as_seq_seq rg h1 rs i j)))
let rec as_seq_seq_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
as_seq_seq_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val as_seq_sub_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
loc_disjoint p (V.loc_vector rv) /\
modifies p h0 h1))
(ensures (rv_elems_inv_preserved rv i j p h0 h1;
S.equal (as_seq_sub h0 rv i j)
(as_seq_sub h1 rv i j)))
let as_seq_sub_preserved #a #rst #rg rv i j p h0 h1 =
as_seq_seq_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val as_seq_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv_preserved_ rv p h0 h1;
S.equal (as_seq h0 rv) (as_seq h1 rv)))
let as_seq_preserved_ #a #rst #rg rv p h0 h1 =
as_seq_sub_preserved rv 0ul (V.size_of rv) p h0 h1
// The second core lemma of `rvector`
val as_seq_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv_preserved rv p h0 h1;
S.equal (as_seq h0 rv) (as_seq h1 rv)))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let as_seq_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
as_seq_preserved_ rv p h0 h1
/// Construction
val alloc_empty:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 bv h1 -> h0 == h1 /\ V.size_of bv = 0ul))
let alloc_empty #a #rst rg =
V.alloc_empty a
val alloc_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
cidx:uint32_t{cidx <= V.size_of rv} ->
HST.ST unit
(requires (fun h0 -> rv_itself_inv h0 rv))
(ensures (fun h0 _ h1 ->
modifies (V.loc_vector_within rv 0ul cidx) h0 h1 /\
rv_itself_inv h1 rv /\
rv_elems_inv h1 rv 0ul cidx /\
rv_elems_reg h1 rv 0ul cidx /\
S.equal (as_seq_sub h1 rv 0ul cidx)
(S.create (U32.v cidx) (Ghost.reveal (Rgl?.irepr rg))) /\
// the loop invariant for this function
V.forall_ h1 rv 0ul cidx
(fun r -> HS.fresh_region (Rgl?.region_of rg r) h0 h1 /\
Rgl?.r_alloc_p rg r) /\
Set.subset (Map.domain (HS.get_hmap h0))
(Map.domain (HS.get_hmap h1))))
(decreases (U32.v cidx))
#reset-options "--z3rlimit 20"
let rec alloc_ #a #rst #rg rv cidx =
let hh0 = HST.get () in
if cidx = 0ul then ()
else (let nrid = HST.new_region (V.frameOf rv) in
let v = rg_alloc rg nrid in
let hh1 = HST.get () in
V.assign rv (cidx - 1ul) v;
let hh2 = HST.get () in
V.loc_vector_within_included rv (cidx - 1ul) cidx;
Rgl?.r_sep
rg (V.get hh2 rv (cidx - 1ul))
(V.loc_vector_within rv (cidx - 1ul) cidx)
hh1 hh2;
alloc_ rv (cidx - 1ul);
let hh3 = HST.get () in
V.loc_vector_within_included rv 0ul (cidx - 1ul);
Rgl?.r_sep
rg (V.get hh3 rv (cidx - 1ul))
(V.loc_vector_within rv 0ul (cidx - 1ul))
hh2 hh3;
V.forall2_extend hh3 rv 0ul (cidx - 1ul)
(fun r1 r2 -> HS.disjoint (Rgl?.region_of rg r1)
(Rgl?.region_of rg r2));
V.loc_vector_within_union_rev rv 0ul cidx)
val alloc_rid:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} -> rid:HST.erid ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
V.frameOf rv = rid /\
V.size_of rv = len /\
V.forall_all h1 rv (fun r -> Rgl?.r_alloc_p rg r) /\
S.equal (as_seq h1 rv)
(S.create (U32.v len) (Ghost.reveal (Rgl?.irepr rg)))))
let alloc_rid #a #rst rg len rid =
let vec = V.alloc_rid len (rg_dummy rg) rid in
alloc_ #a #rst #rg vec len;
V.loc_vector_within_included vec 0ul len;
vec
val alloc_reserve:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} -> rid:HST.erid ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
V.frameOf rv = rid /\
V.size_of rv = 0ul /\
S.equal (as_seq h1 rv) S.empty /\
Set.equal (Map.domain (HS.get_hmap h0))
(Map.domain (HS.get_hmap h1)) /\
B.fresh_loc (V.loc_vector rv) h0 h1))
let alloc_reserve #a #rst rg len rid =
V.alloc_reserve len (rg_dummy rg) rid
val alloc:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
HS.fresh_region (V.frameOf rv) h0 h1 /\
V.size_of rv = len /\
V.forall_all h1 rv (fun r -> Rgl?.r_alloc_p rg r) /\
S.equal (as_seq h1 rv)
(S.create (U32.v len) (Ghost.reveal (Rgl?.irepr rg)))))
let alloc #a #rst rg len =
let nrid = HST.new_region HS.root in
alloc_rid rg len nrid
val insert:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg{not (V.is_full rv)} -> v:a ->
HST.ST (rvector rg)
(requires (fun h0 ->
rv_inv h0 rv /\ rg_inv rg h0 v /\
HS.extends (Rgl?.region_of rg v) (V.frameOf rv) /\
V.forall_all h0 rv
(fun b -> HS.disjoint (Rgl?.region_of rg b)
(Rgl?.region_of rg v))))
(ensures (fun h0 irv h1 ->
V.size_of irv = V.size_of rv + 1ul /\
V.frameOf rv = V.frameOf irv /\
modifies (loc_union (V.loc_addr_of_vector rv)
(V.loc_vector irv)) h0 h1 /\
rv_inv h1 irv /\
V.get h1 irv (V.size_of rv) == v /\
S.equal (as_seq h1 irv)
(S.snoc (as_seq h0 rv) (Rgl?.r_repr rg h0 v))))
#reset-options "--z3rlimit 20"
let insert #a #rst #rg rv v =
let hh0 = HST.get () in
let irv = V.insert rv v in
let hh1 = HST.get () in
// Safety
rs_loc_elems_parent_disj
rg (V.as_seq hh0 rv) (V.frameOf rv) 0 (U32.v (V.size_of rv));
rs_elems_inv_preserved
rg (V.as_seq hh0 rv) 0 (U32.v (V.size_of rv))
(loc_region_only false (V.frameOf rv))
hh0 hh1;
Rgl?.r_sep rg v
(loc_region_only false (V.frameOf rv))
hh0 hh1;
// Correctness
assert (S.equal (V.as_seq hh0 rv)
(S.slice (V.as_seq hh1 irv) 0 (U32.v (V.size_of rv))));
as_seq_seq_preserved
rg (V.as_seq hh0 rv)
0 (U32.v (V.size_of rv))
(loc_region_only false (V.frameOf rv)) hh0 hh1;
as_seq_seq_slice
rg hh1 (V.as_seq hh1 irv) 0 (U32.v (V.size_of irv))
0 (U32.v (V.size_of rv));
irv
val insert_copy:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> cp:copyable #rst a rg ->
rv:rvector rg{not (V.is_full rv)} -> v:a ->
HST.ST (rvector rg)
(requires (fun h0 ->
rv_inv h0 rv /\ rg_inv rg h0 v /\
HS.disjoint (Rgl?.region_of rg v) (V.frameOf rv)))
(ensures (fun h0 irv h1 ->
V.size_of irv = V.size_of rv + 1ul /\
V.frameOf rv = V.frameOf irv /\
modifies (loc_rvector rv) h0 h1 /\
rv_inv h1 irv /\
S.equal (as_seq h1 irv)
(S.snoc (as_seq h0 rv) (Rgl?.r_repr rg h0 v))))
let insert_copy #a #rst #rg cp rv v =
let hh0 = HST.get () in
rv_elems_inv_live_region hh0 rv 0ul (V.size_of rv);
let nrid = HST.new_region (V.frameOf rv) in
let nv = rg_alloc rg nrid in
let hh1 = HST.get () in
Rgl?.r_sep rg v loc_none hh0 hh1;
rv_inv_preserved rv loc_none hh0 hh1;
as_seq_preserved rv loc_none hh0 hh1;
Cpy?.copy cp (Rgl?.state rg) v nv;
let hh2 = HST.get () in
rv_loc_elems_each_disj hh2 rv 0ul (V.size_of rv) nrid;
rv_inv_preserved_ rv (loc_all_regions_from false nrid) hh1 hh2;
as_seq_preserved_ rv (loc_all_regions_from false nrid) hh1 hh2;
insert rv nv
val assign:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} -> v:a ->
HST.ST unit
(requires (fun h0 ->
// rv_inv h0 rv /\
rv_itself_inv h0 rv /\
rv_elems_inv h0 rv 0ul i /\
rv_elems_inv h0 rv (i + 1ul) (V.size_of rv) /\
elems_reg h0 rv /\
V.forall_ h0 rv 0ul i
(fun b -> HS.disjoint (Rgl?.region_of rg b)
(Rgl?.region_of rg v)) /\
V.forall_ h0 rv (i + 1ul) (V.size_of rv)
(fun b -> HS.disjoint (Rgl?.region_of rg b)
(Rgl?.region_of rg v)) /\
rg_inv rg h0 v /\
HS.extends (Rgl?.region_of rg v) (V.frameOf rv)))
(ensures (fun h0 _ h1 ->
modifies (V.loc_vector_within rv i (i + 1ul)) h0 h1 /\
rv_inv h1 rv /\
V.get h1 rv i == v /\
S.equal (as_seq h1 rv)
(S.append
(as_seq_sub h0 rv 0ul i)
(S.cons (Rgl?.r_repr rg h0 v) | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val assign:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} -> v:a ->
HST.ST unit
(requires (fun h0 ->
// rv_inv h0 rv /\
rv_itself_inv h0 rv /\
rv_elems_inv h0 rv 0ul i /\
rv_elems_inv h0 rv (i + 1ul) (V.size_of rv) /\
elems_reg h0 rv /\
V.forall_ h0 rv 0ul i
(fun b -> HS.disjoint (Rgl?.region_of rg b)
(Rgl?.region_of rg v)) /\
V.forall_ h0 rv (i + 1ul) (V.size_of rv)
(fun b -> HS.disjoint (Rgl?.region_of rg b)
(Rgl?.region_of rg v)) /\
rg_inv rg h0 v /\
HS.extends (Rgl?.region_of rg v) (V.frameOf rv)))
(ensures (fun h0 _ h1 ->
modifies (V.loc_vector_within rv i (i + 1ul)) h0 h1 /\
rv_inv h1 rv /\
V.get h1 rv i == v /\
S.equal (as_seq h1 rv)
(S.append
(as_seq_sub h0 rv 0ul i)
(S.cons (Rgl?.r_repr rg h0 v)
(as_seq_sub h0 rv (i + 1ul) (V.size_of rv)))))) | [] | LowStar.RVector.assign | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | rv: LowStar.RVector.rvector rg -> i: LowStar.Vector.uint32_t{i < LowStar.Vector.size_of rv} -> v: a
-> FStar.HyperStack.ST.ST Prims.unit | {
"end_col": 29,
"end_line": 929,
"start_col": 31,
"start_line": 903
} |
FStar.HyperStack.ST.ST | val flush:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg -> i:uint32_t{i <= V.size_of rv} ->
HST.ST (rvector rg)
(requires (fun h0 -> rv_inv h0 rv))
(ensures (fun h0 frv h1 ->
V.size_of frv = V.size_of rv - i /\
V.frameOf rv = V.frameOf frv /\
modifies (loc_rvector rv) h0 h1 /\
rv_inv h1 frv /\
S.equal (as_seq h1 frv)
(S.slice (as_seq h0 rv) (U32.v i) (U32.v (V.size_of rv))))) | [
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Modifies",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Classical",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let flush #a #rst #rg rv i =
let hh0 = HST.get () in
(if i = 0ul then () else free_elems rv (i - 1ul));
rv_loc_elems_included hh0 rv 0ul i;
let hh1 = HST.get () in
assert (modifies (rs_loc_elems rg (V.as_seq hh0 rv) 0 (U32.v i)) hh0 hh1);
let frv = V.flush rv (rg_dummy rg) i in
let hh2 = HST.get () in
assert (modifies (loc_region_only false (V.frameOf rv)) hh1 hh2);
// Safety
rs_loc_elems_disj
rg (V.as_seq hh0 rv) (V.frameOf rv) 0 (U32.v (V.size_of rv))
0 (U32.v i) (U32.v i) (U32.v (V.size_of rv));
rs_loc_elems_parent_disj
rg (V.as_seq hh0 rv) (V.frameOf rv)
(U32.v i) (U32.v (V.size_of rv));
rs_elems_inv_preserved
rg (V.as_seq hh0 rv) (U32.v i) (U32.v (V.size_of rv))
(loc_union (rs_loc_elems rg (V.as_seq hh0 rv) 0 (U32.v i))
(loc_region_only false (V.frameOf rv)))
hh0 hh2;
assert (rv_inv #a #rst #rg hh2 frv);
// Correctness
as_seq_seq_preserved
rg (V.as_seq hh0 rv) (U32.v i) (U32.v (V.size_of rv))
(loc_union (rs_loc_elems rg (V.as_seq hh0 rv) 0 (U32.v i))
(loc_region_only false (V.frameOf rv)))
hh0 hh2;
as_seq_seq_slice
rg hh0 (V.as_seq hh0 rv) 0 (U32.v (V.size_of rv))
(U32.v i) (U32.v (V.size_of rv));
assert (S.equal (S.slice (as_seq hh0 rv) (U32.v i) (U32.v (V.size_of rv)))
(as_seq_seq rg hh2 (V.as_seq hh0 rv)
(U32.v i) (U32.v (V.size_of rv))));
as_seq_seq_eq
rg hh2 (V.as_seq hh0 rv) (V.as_seq hh2 frv)
(U32.v i) (U32.v (V.size_of rv)) 0 (U32.v (V.size_of frv));
assert (S.equal (as_seq_seq rg hh2 (V.as_seq hh2 frv)
0 (U32.v (V.size_of frv)))
(as_seq_seq rg hh2 (V.as_seq hh0 rv)
(U32.v i) (U32.v (V.size_of rv))));
assert (S.equal (S.slice (as_seq hh0 rv) (U32.v i) (U32.v (V.size_of rv)))
(as_seq hh2 frv));
frv | val flush:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg -> i:uint32_t{i <= V.size_of rv} ->
HST.ST (rvector rg)
(requires (fun h0 -> rv_inv h0 rv))
(ensures (fun h0 frv h1 ->
V.size_of frv = V.size_of rv - i /\
V.frameOf rv = V.frameOf frv /\
modifies (loc_rvector rv) h0 h1 /\
rv_inv h1 frv /\
S.equal (as_seq h1 frv)
(S.slice (as_seq h0 rv) (U32.v i) (U32.v (V.size_of rv)))))
let flush #a #rst #rg rv i = | true | null | false | let hh0 = HST.get () in
(if i = 0ul then () else free_elems rv (i - 1ul));
rv_loc_elems_included hh0 rv 0ul i;
let hh1 = HST.get () in
assert (modifies (rs_loc_elems rg (V.as_seq hh0 rv) 0 (U32.v i)) hh0 hh1);
let frv = V.flush rv (rg_dummy rg) i in
let hh2 = HST.get () in
assert (modifies (loc_region_only false (V.frameOf rv)) hh1 hh2);
rs_loc_elems_disj rg
(V.as_seq hh0 rv)
(V.frameOf rv)
0
(U32.v (V.size_of rv))
0
(U32.v i)
(U32.v i)
(U32.v (V.size_of rv));
rs_loc_elems_parent_disj rg (V.as_seq hh0 rv) (V.frameOf rv) (U32.v i) (U32.v (V.size_of rv));
rs_elems_inv_preserved rg
(V.as_seq hh0 rv)
(U32.v i)
(U32.v (V.size_of rv))
(loc_union (rs_loc_elems rg (V.as_seq hh0 rv) 0 (U32.v i)) (loc_region_only false (V.frameOf rv)))
hh0
hh2;
assert (rv_inv #a #rst #rg hh2 frv);
as_seq_seq_preserved rg
(V.as_seq hh0 rv)
(U32.v i)
(U32.v (V.size_of rv))
(loc_union (rs_loc_elems rg (V.as_seq hh0 rv) 0 (U32.v i)) (loc_region_only false (V.frameOf rv)))
hh0
hh2;
as_seq_seq_slice rg hh0 (V.as_seq hh0 rv) 0 (U32.v (V.size_of rv)) (U32.v i) (U32.v (V.size_of rv));
assert (S.equal (S.slice (as_seq hh0 rv) (U32.v i) (U32.v (V.size_of rv)))
(as_seq_seq rg hh2 (V.as_seq hh0 rv) (U32.v i) (U32.v (V.size_of rv))));
as_seq_seq_eq rg
hh2
(V.as_seq hh0 rv)
(V.as_seq hh2 frv)
(U32.v i)
(U32.v (V.size_of rv))
0
(U32.v (V.size_of frv));
assert (S.equal (as_seq_seq rg hh2 (V.as_seq hh2 frv) 0 (U32.v (V.size_of frv)))
(as_seq_seq rg hh2 (V.as_seq hh0 rv) (U32.v i) (U32.v (V.size_of rv))));
assert (S.equal (S.slice (as_seq hh0 rv) (U32.v i) (U32.v (V.size_of rv))) (as_seq hh2 frv));
frv | {
"checked_file": "LowStar.RVector.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.Modifies.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.Integers.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": false,
"source_file": "LowStar.RVector.fst"
} | [] | [
"LowStar.Regional.regional",
"LowStar.RVector.rvector",
"LowStar.Vector.uint32_t",
"Prims.b2t",
"FStar.Integers.op_Less_Equals",
"FStar.Integers.Unsigned",
"FStar.Integers.W32",
"LowStar.Vector.size_of",
"Prims.unit",
"Prims._assert",
"FStar.Seq.Base.equal",
"LowStar.Regional.__proj__Rgl__item__repr",
"FStar.Seq.Base.slice",
"LowStar.RVector.as_seq",
"FStar.UInt32.v",
"LowStar.RVector.as_seq_seq",
"LowStar.Vector.as_seq",
"LowStar.RVector.as_seq_seq_eq",
"LowStar.RVector.as_seq_seq_slice",
"LowStar.RVector.as_seq_seq_preserved",
"LowStar.Monotonic.Buffer.loc_union",
"LowStar.RVector.rs_loc_elems",
"LowStar.Monotonic.Buffer.loc_region_only",
"LowStar.Vector.frameOf",
"LowStar.RVector.rv_inv",
"LowStar.RVector.rs_elems_inv_preserved",
"LowStar.RVector.rs_loc_elems_parent_disj",
"LowStar.RVector.rs_loc_elems_disj",
"LowStar.Monotonic.Buffer.modifies",
"FStar.Monotonic.HyperStack.mem",
"FStar.HyperStack.ST.get",
"LowStar.Vector.vector",
"LowStar.Vector.flush",
"LowStar.Regional.rg_dummy",
"LowStar.RVector.rv_loc_elems_included",
"FStar.UInt32.__uint_to_t",
"Prims.op_Equality",
"FStar.UInt32.t",
"Prims.bool",
"LowStar.RVector.free_elems",
"FStar.Integers.op_Subtraction"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module LowStar.RVector
open FStar.Classical
open FStar.Integers
open LowStar.Modifies
open LowStar.Regional
open LowStar.Vector
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module S = FStar.Seq
module B = LowStar.Buffer
module V = LowStar.Vector
module U32 = FStar.UInt32
/// Utilities
/// A `regional` type `a` is also `copyable` when there exists a copy operator
/// that guarantees the same representation between `src` and `dst`.
/// For instance, the `copy` operation for `B.buffer a` is `B.blit`.
///
/// Here, no reference at run-time is kept to the state argument of the
/// regional; conceivably, the caller will already have some reference handy to
/// the instance of the regional class and can retrieve the parameter from
/// there.
inline_for_extraction
noeq type copyable (#rst:Type) (a:Type0) (rg:regional rst a) =
| Cpy:
copy: (s:rst{s==Rgl?.state rg} -> src:a -> dst:a ->
HST.ST unit
(requires (fun h0 ->
rg_inv rg h0 src /\ rg_inv rg h0 dst /\
HS.disjoint (Rgl?.region_of rg src)
(Rgl?.region_of rg dst)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg dst)) h0 h1 /\
rg_inv rg h1 dst /\
Rgl?.r_repr rg h1 dst == Rgl?.r_repr rg h0 src))) ->
copyable a rg
// rst: regional state
type rvector (#a:Type0) (#rst:Type) (rg:regional rst a) = V.vector a
val loc_rvector:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg -> GTot loc
let loc_rvector #a #rst #rg rv =
loc_all_regions_from false (V.frameOf rv)
/// The invariant of `rvector`
// Here we will define the invariant for `rvector #a` that contains
// the invariant for each element and some more about the vector itself.
val rs_elems_inv:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_inv #a #rst rg h rs i j =
V.forall_seq rs i j (rg_inv rg h)
val rv_elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_inv #a #rst #rg h rv i j =
rs_elems_inv rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val elems_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_inv #a #rst #rg h rv =
rv_elems_inv h rv 0ul (V.size_of rv)
val rs_elems_reg:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot Type0
let rs_elems_reg #a #rst rg rs prid i j =
V.forall_seq rs i j
(fun v -> HS.extends (Rgl?.region_of rg v) prid) /\
V.forall2_seq rs i j
(fun v1 v2 -> HS.disjoint (Rgl?.region_of rg v1)
(Rgl?.region_of rg v2))
val rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot Type0
let rv_elems_reg #a #rst #rg h rv i j =
rs_elems_reg rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
GTot Type0
let elems_reg #a #rst #rg h rv =
rv_elems_reg h rv 0ul (V.size_of rv)
val rv_itself_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_itself_inv #a #rst #rg h rv =
V.live h rv /\ V.freeable rv /\
HST.is_eternal_region (V.frameOf rv)
// This is the invariant of `rvector`.
val rv_inv:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg -> GTot Type0
let rv_inv #a #rst #rg h rv =
elems_inv h rv /\
elems_reg h rv /\
rv_itself_inv h rv
val rs_elems_inv_live_region:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_inv rg h rs i j))
(ensures (V.forall_seq rs i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rec rs_elems_inv_live_region #a #rst rg h rs i j =
if i = j then ()
else (Rgl?.r_inv_reg rg h (S.index rs (j - 1));
rs_elems_inv_live_region rg h rs i (j - 1))
val rv_elems_inv_live_region:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_inv h rv i j))
(ensures (V.forall_ h rv i j
(fun r -> HS.live_region h (Rgl?.region_of rg r))))
let rv_elems_inv_live_region #a #rst #rg h rv i j =
rs_elems_inv_live_region rg h (V.as_seq h rv) (U32.v i) (U32.v j)
/// Utilities for fine-grained region control
val rs_loc_elem:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat{i < S.length rs} ->
GTot loc
let rs_loc_elem #a #rst rg rs i =
loc_all_regions_from false (Rgl?.region_of rg (S.index rs i))
val rs_loc_elems:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
GTot loc (decreases j)
let rec rs_loc_elems #a #rst rg rs i j =
if i = j then loc_none
else loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1))
val rv_loc_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
GTot loc
let rv_loc_elems #a #rst #rg h rv i j =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v j)
val rv_loc_elem:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
GTot loc
let rv_loc_elem #a #rst #rg h rv i =
rs_loc_elems rg (V.as_seq h rv) (U32.v i) (U32.v i+1)
// Properties about inclusion of locations
val rs_loc_elems_rec_inverse:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i < j && j <= S.length rs} ->
Lemma (requires true)
(ensures (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) j)))
(decreases j)
let rec rs_loc_elems_rec_inverse #a #rst rg rs i j =
if i + 1 = j then ()
else (assert (rs_loc_elems rg rs i j ==
loc_union (rs_loc_elems rg rs i (j - 1))
(rs_loc_elem rg rs (j - 1)));
assert (rs_loc_elems rg rs (i + 1) j ==
loc_union (rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)));
rs_loc_elems_rec_inverse rg rs i (j - 1);
assert (rs_loc_elems rg rs i j ==
loc_union (loc_union
(rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1)))
(rs_loc_elem rg rs (j - 1)));
loc_union_assoc (rs_loc_elem rg rs i)
(rs_loc_elems rg rs (i + 1) (j - 1))
(rs_loc_elem rg rs (j - 1)))
val rs_loc_elems_includes:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
Lemma (loc_includes (rs_loc_elems rg rs i j)
(rs_loc_elem rg rs k))
let rec rs_loc_elems_includes #a #rst rg rs i j k =
if k = j - 1 then ()
else rs_loc_elems_includes #a #rst rg rs i (j - 1) k
val loc_all_exts_from:
preserve_liveness: bool -> r: HS.rid -> GTot loc
let loc_all_exts_from preserve_liveness r =
B.loc_regions
preserve_liveness
(Set.intersect
(HS.mod_set (Set.singleton r))
(Set.complement (Set.singleton r)))
val rs_loc_elem_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat{i < S.length rs} ->
Lemma (requires (HS.extends (Rgl?.region_of rg (S.index rs i)) prid))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elem rg rs i)))
let rs_loc_elem_included #a #rst rg rs prid i = ()
val rs_loc_elems_included:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_includes (loc_all_exts_from false prid)
(rs_loc_elems rg rs i j)))
(decreases j)
let rec rs_loc_elems_included #a #rst rg rs prid i j =
if i = j then ()
else (rs_loc_elem_included rg rs prid (j - 1);
rs_loc_elems_included rg rs prid i (j - 1))
val rv_loc_elems_included:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_includes (loc_all_exts_from false (V.frameOf rv))
(rv_loc_elems h rv i j)))
let rv_loc_elems_included #a #rst #rg h rv i j =
rs_loc_elems_included rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
// Properties about disjointness of locations
val rs_loc_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k:nat{i <= k && k < j} ->
l:nat{i <= l && l < j && k <> l} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj #a #rst rg rs prid i j k l = ()
val rs_loc_elem_disj_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (
forall (k:nat{i <= k && k < j}).
forall (l:nat{i <= l && l < j && k <> l}).
loc_disjoint (rs_loc_elem rg rs k)
(rs_loc_elem rg rs l)))
let rs_loc_elem_disj_forall #a #rst rg rs prid i j = ()
val rs_loc_elems_elem_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l:nat{i <= l && l < j && (l < k1 || k2 <= l)} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elem rg rs l)))
(decreases k2)
let rec rs_loc_elems_elem_disj #a #rst rg rs prid i j k1 k2 l =
if k1 = k2 then ()
else (rs_loc_elem_disj rg rs prid i j (k2 - 1) l;
rs_loc_elems_elem_disj rg rs prid i j k1 (k2 - 1) l)
val rs_loc_elems_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
k1:nat{i <= k1} ->
k2:nat{k1 <= k2 && k2 <= j} ->
l1:nat{i <= l1} ->
l2:nat{l1 <= l2 && l2 <= j} ->
Lemma (requires (rs_elems_reg rg rs prid i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rs_loc_elems rg rs k1 k2)
(rs_loc_elems rg rs l1 l2)))
(decreases k2)
let rec rs_loc_elems_disj #a #rst rg rs prid i j k1 k2 l1 l2 =
if k1 = k2 then ()
else (rs_loc_elems_elem_disj rg rs prid i j l1 l2 (k2 - 1);
rs_loc_elems_disj rg rs prid i j k1 (k2 - 1) l1 l2)
val rv_loc_elems_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
k1:uint32_t{i <= k1} ->
k2:uint32_t{k1 <= k2 && k2 <= j} ->
l1:uint32_t{i <= l1} ->
l2:uint32_t{l1 <= l2 && l2 <= j} ->
Lemma (requires (rv_elems_reg h rv i j /\ (k2 <= l1 || l2 <= k1)))
(ensures (loc_disjoint (rv_loc_elems h rv k1 k2)
(rv_loc_elems h rv l1 l2)))
let rv_loc_elems_disj #a #rst #rg h rv i j k1 k2 l1 l2 =
rs_loc_elems_disj rg (V.as_seq h rv) (V.frameOf rv)
(U32.v i) (U32.v j) (U32.v k1) (U32.v k2) (U32.v l1) (U32.v l2)
val rs_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> prid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (rs_elems_reg rg rs prid i j))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_region_only false prid)))
(decreases j)
let rec rs_loc_elems_parent_disj #a #rst rg rs prid i j =
if i = j then ()
else rs_loc_elems_parent_disj rg rs prid i (j - 1)
val rv_loc_elems_parent_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (rv_elems_reg h rv i j))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_region_only false (V.frameOf rv))))
let rv_loc_elems_parent_disj #a #rst #rg h rv i j =
rs_loc_elems_parent_disj rg (V.as_seq h rv) (V.frameOf rv) (U32.v i) (U32.v j)
val rs_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> drid:HS.rid ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
Lemma (requires (V.forall_seq rs i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rs_loc_elems rg rs i j)
(loc_all_regions_from false drid)))
(decreases j)
let rec rs_loc_elems_each_disj #a #rst rg rs drid i j =
if i = j then ()
else rs_loc_elems_each_disj rg rs drid i (j - 1)
val rv_loc_elems_each_disj:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
drid:HS.rid ->
Lemma (requires (V.forall_ h rv i j
(fun r -> HS.disjoint (Rgl?.region_of rg r) drid)))
(ensures (loc_disjoint (rv_loc_elems h rv i j)
(loc_all_regions_from false drid)))
let rv_loc_elems_each_disj #a #rst #rg h rv i j drid =
rs_loc_elems_each_disj rg (V.as_seq h rv) drid (U32.v i) (U32.v j)
// Preservation based on disjointness
val rv_loc_elems_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
loc_disjoint p (V.loc_vector_within rv i j) /\
modifies p h0 h1))
(ensures (rv_loc_elems h0 rv i j ==
rv_loc_elems h1 rv i j))
(decreases (U32.v j))
let rec rv_loc_elems_preserved #a #rst #rg rv i j p h0 h1 =
if i = j then ()
else (V.loc_vector_within_includes rv i j (j - 1ul) j;
V.get_preserved rv (j - 1ul) p h0 h1;
assert (V.get h0 rv (j - 1ul) == V.get h1 rv (j - 1ul));
V.loc_vector_within_includes rv i j i (j - 1ul);
rv_loc_elems_preserved rv i (j - 1ul) p h0 h1)
val rs_elems_inv_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a -> rs:S.seq a ->
i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv rg h1 rs i j))
(decreases j)
let rec rs_elems_inv_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val rv_elems_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (V.loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
modifies p h0 h1))
(ensures (rv_elems_inv h1 rv i j))
let rv_elems_inv_preserved #a #rst #rg rv i j p h0 h1 =
rs_elems_inv_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val rv_inv_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_ #a #rst #rg rv p h0 h1 =
rv_elems_inv_preserved #a #rst #rg rv 0ul (V.size_of rv) p h0 h1
// The first core lemma of `rvector`
val rv_inv_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv h1 rv))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let rv_inv_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
rv_inv_preserved_ rv p h0 h1
val rv_inv_preserved_int:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} ->
h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
modifies (loc_all_regions_from false
(Rgl?.region_of rg (V.get h0 rv i))) h0 h1 /\
rg_inv rg h1 (V.get h1 rv i)))
(ensures (rv_inv h1 rv))
let rv_inv_preserved_int #a #rst #rg rv i h0 h1 =
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv)) 0 (U32.v i) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) 0 (U32.v i)
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1;
rs_loc_elems_elem_disj
rg (V.as_seq h0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv))
(U32.v i + 1) (U32.v (V.size_of rv)) (U32.v i);
rs_elems_inv_preserved
rg (V.as_seq h0 rv) (U32.v i + 1) (U32.v (V.size_of rv))
(loc_all_regions_from false
(Rgl?.region_of rg (V.get h1 rv i)))
h0 h1
/// Representation
val as_seq_seq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = j - i})
(decreases j)
let rec as_seq_seq #a #rst rg h rs i j =
if i = j then S.empty
else S.snoc (as_seq_seq rg h rs i (j - 1))
(Rgl?.r_repr rg h (S.index rs (j - 1)))
val as_seq_sub:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t ->
j:uint32_t{
i <= j /\
j <= V.size_of rv /\
rv_elems_inv h rv i j} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v j - U32.v i})
(decreases (U32.v j))
let as_seq_sub #a #rst #rg h rv i j =
as_seq_seq rg h (V.as_seq h rv) (U32.v i) (U32.v j)
val as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
GTot (s:S.seq (Rgl?.repr rg){S.length s = U32.v (V.size_of rv)})
let as_seq #a #rst #rg h rv =
as_seq_sub h rv 0ul (V.size_of rv)
val as_seq_sub_as_seq:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg{rv_inv h rv} ->
Lemma (S.equal (as_seq_sub h rv 0ul (V.size_of rv))
(as_seq h rv))
[SMTPat (as_seq_sub h rv 0ul (V.size_of rv))]
let as_seq_sub_as_seq #a #rst #rg h rv = ()
val as_seq_seq_index:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat{k < j - i} ->
Lemma (requires true)
(ensures (S.index (as_seq_seq rg h rs i j) k ==
Rgl?.r_repr rg h (S.index rs (i + k))))
(decreases j)
[SMTPat (S.index (as_seq_seq rg h rs i j) k)]
let rec as_seq_seq_index #a #rst rg h rs i j k =
if i = j then ()
else if k = j - i - 1 then ()
else as_seq_seq_index rg h rs i (j - 1) k
val as_seq_seq_eq:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs1:S.seq a -> rs2:S.seq a ->
i:nat ->
j:nat{i <= j /\ j <= S.length rs1 /\ rs_elems_inv rg h rs1 i j} ->
k:nat ->
l:nat{k <= l /\ l <= S.length rs2 /\ rs_elems_inv rg h rs2 k l} ->
Lemma (requires (S.equal (S.slice rs1 i j) (S.slice rs2 k l)))
(ensures (S.equal (as_seq_seq rg h rs1 i j)
(as_seq_seq rg h rs2 k l)))
let as_seq_seq_eq #a #rst rg h rs1 rs2 i j k l =
assert (forall (a:nat{a < j - i}).
S.index (as_seq_seq rg h rs1 i j) a ==
Rgl?.r_repr rg h (S.index rs1 (i + a)));
assert (forall (a:nat{a < l - k}).
S.index (as_seq_seq rg h rs2 k l) a ==
Rgl?.r_repr rg h (S.index rs2 (k + a)));
assert (S.length (S.slice rs1 i j) = j - i);
assert (S.length (S.slice rs2 k l) = l - k);
assert (forall (a:nat{a < j - i}).
S.index (S.slice rs1 i j) a ==
S.index (S.slice rs2 k l) a);
assert (forall (a:nat{a < j - i}).
S.index rs1 (i + a) == S.index rs2 (k + a))
val as_seq_seq_slice:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat -> j:nat{i <= j /\ j <= S.length rs /\ rs_elems_inv rg h rs i j} ->
k:nat -> l:nat{k <= l && l <= j - i} ->
Lemma (S.equal (S.slice (as_seq_seq rg h rs i j) k l)
(as_seq_seq rg h (S.slice rs (i + k) (i + l)) 0 (l - k)))
#reset-options "--z3rlimit 10"
let rec as_seq_seq_slice #a #rst rg h rs i j k l =
if k = l then ()
else (as_seq_seq_slice rg h rs i j k (l - 1);
as_seq_seq_index rg h rs i j (l - 1);
as_seq_seq_eq rg h
(S.slice rs (i + k) (i + l - 1))
(S.slice rs (i + k) (i + l))
0 (l - k - 1) 0 (l - k - 1))
val as_seq_seq_upd:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
h:HS.mem -> rs:S.seq a ->
i:nat ->
j:nat{
i <= j /\
j <= S.length rs /\
rs_elems_inv rg h rs i j} ->
k:nat{i <= k && k < j} -> v:a{rg_inv rg h v} ->
Lemma (S.equal (as_seq_seq rg h (S.upd rs k v) i j)
(S.upd (as_seq_seq rg h rs i j) (k - i)
(Rgl?.r_repr rg h v)))
let rec as_seq_seq_upd #a #rst rg h rs i j k v =
if i = j then ()
else if k = j - 1 then ()
else as_seq_seq_upd rg h rs i (j - 1) k v
// Preservation based on disjointness
val as_seq_seq_preserved:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
rs:S.seq a -> i:nat -> j:nat{i <= j && j <= S.length rs} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rs_elems_inv rg h0 rs i j /\
loc_disjoint p (rs_loc_elems rg rs i j) /\
modifies p h0 h1))
(ensures (rs_elems_inv_preserved rg rs i j p h0 h1;
S.equal (as_seq_seq rg h0 rs i j)
(as_seq_seq rg h1 rs i j)))
let rec as_seq_seq_preserved #a #rst rg rs i j p h0 h1 =
if i = j then ()
else (rs_elems_inv_preserved rg rs i (j - 1) p h0 h1;
as_seq_seq_preserved rg rs i (j - 1) p h0 h1;
Rgl?.r_sep rg (S.index rs (j - 1)) p h0 h1)
val as_seq_sub_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 rv /\
rv_elems_inv h0 rv i j /\
loc_disjoint p (rv_loc_elems h0 rv i j) /\
loc_disjoint p (V.loc_vector rv) /\
modifies p h0 h1))
(ensures (rv_elems_inv_preserved rv i j p h0 h1;
S.equal (as_seq_sub h0 rv i j)
(as_seq_sub h1 rv i j)))
let as_seq_sub_preserved #a #rst #rg rv i j p h0 h1 =
as_seq_seq_preserved rg (V.as_seq h0 rv) (U32.v i) (U32.v j) p h0 h1
val as_seq_preserved_:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_vector rv) /\
loc_disjoint p (rv_loc_elems h0 rv 0ul (V.size_of rv)) /\
modifies p h0 h1))
(ensures (rv_inv_preserved_ rv p h0 h1;
S.equal (as_seq h0 rv) (as_seq h1 rv)))
let as_seq_preserved_ #a #rst #rg rv p h0 h1 =
as_seq_sub_preserved rv 0ul (V.size_of rv) p h0 h1
// The second core lemma of `rvector`
val as_seq_preserved:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (rv_inv h0 rv /\
loc_disjoint p (loc_rvector rv) /\
modifies p h0 h1))
(ensures (rv_inv_preserved rv p h0 h1;
S.equal (as_seq h0 rv) (as_seq h1 rv)))
[SMTPat (rv_inv h0 rv);
SMTPat (loc_disjoint p (loc_rvector rv));
SMTPat (modifies p h0 h1)]
let as_seq_preserved #a #rst #rg rv p h0 h1 =
assert (loc_includes (loc_rvector rv) (V.loc_vector rv));
rv_loc_elems_included h0 rv 0ul (V.size_of rv);
assert (loc_includes (loc_rvector rv) (rv_loc_elems h0 rv 0ul (V.size_of rv)));
as_seq_preserved_ rv p h0 h1
/// Construction
val alloc_empty:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 bv h1 -> h0 == h1 /\ V.size_of bv = 0ul))
let alloc_empty #a #rst rg =
V.alloc_empty a
val alloc_:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
cidx:uint32_t{cidx <= V.size_of rv} ->
HST.ST unit
(requires (fun h0 -> rv_itself_inv h0 rv))
(ensures (fun h0 _ h1 ->
modifies (V.loc_vector_within rv 0ul cidx) h0 h1 /\
rv_itself_inv h1 rv /\
rv_elems_inv h1 rv 0ul cidx /\
rv_elems_reg h1 rv 0ul cidx /\
S.equal (as_seq_sub h1 rv 0ul cidx)
(S.create (U32.v cidx) (Ghost.reveal (Rgl?.irepr rg))) /\
// the loop invariant for this function
V.forall_ h1 rv 0ul cidx
(fun r -> HS.fresh_region (Rgl?.region_of rg r) h0 h1 /\
Rgl?.r_alloc_p rg r) /\
Set.subset (Map.domain (HS.get_hmap h0))
(Map.domain (HS.get_hmap h1))))
(decreases (U32.v cidx))
#reset-options "--z3rlimit 20"
let rec alloc_ #a #rst #rg rv cidx =
let hh0 = HST.get () in
if cidx = 0ul then ()
else (let nrid = HST.new_region (V.frameOf rv) in
let v = rg_alloc rg nrid in
let hh1 = HST.get () in
V.assign rv (cidx - 1ul) v;
let hh2 = HST.get () in
V.loc_vector_within_included rv (cidx - 1ul) cidx;
Rgl?.r_sep
rg (V.get hh2 rv (cidx - 1ul))
(V.loc_vector_within rv (cidx - 1ul) cidx)
hh1 hh2;
alloc_ rv (cidx - 1ul);
let hh3 = HST.get () in
V.loc_vector_within_included rv 0ul (cidx - 1ul);
Rgl?.r_sep
rg (V.get hh3 rv (cidx - 1ul))
(V.loc_vector_within rv 0ul (cidx - 1ul))
hh2 hh3;
V.forall2_extend hh3 rv 0ul (cidx - 1ul)
(fun r1 r2 -> HS.disjoint (Rgl?.region_of rg r1)
(Rgl?.region_of rg r2));
V.loc_vector_within_union_rev rv 0ul cidx)
val alloc_rid:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} -> rid:HST.erid ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
V.frameOf rv = rid /\
V.size_of rv = len /\
V.forall_all h1 rv (fun r -> Rgl?.r_alloc_p rg r) /\
S.equal (as_seq h1 rv)
(S.create (U32.v len) (Ghost.reveal (Rgl?.irepr rg)))))
let alloc_rid #a #rst rg len rid =
let vec = V.alloc_rid len (rg_dummy rg) rid in
alloc_ #a #rst #rg vec len;
V.loc_vector_within_included vec 0ul len;
vec
val alloc_reserve:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} -> rid:HST.erid ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
V.frameOf rv = rid /\
V.size_of rv = 0ul /\
S.equal (as_seq h1 rv) S.empty /\
Set.equal (Map.domain (HS.get_hmap h0))
(Map.domain (HS.get_hmap h1)) /\
B.fresh_loc (V.loc_vector rv) h0 h1))
let alloc_reserve #a #rst rg len rid =
V.alloc_reserve len (rg_dummy rg) rid
val alloc:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
len:uint32_t{len > 0ul} ->
HST.ST (rvector rg)
(requires (fun h0 -> true))
(ensures (fun h0 rv h1 ->
modifies (V.loc_vector rv) h0 h1 /\
rv_inv h1 rv /\
HS.fresh_region (V.frameOf rv) h0 h1 /\
V.size_of rv = len /\
V.forall_all h1 rv (fun r -> Rgl?.r_alloc_p rg r) /\
S.equal (as_seq h1 rv)
(S.create (U32.v len) (Ghost.reveal (Rgl?.irepr rg)))))
let alloc #a #rst rg len =
let nrid = HST.new_region HS.root in
alloc_rid rg len nrid
val insert:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg{not (V.is_full rv)} -> v:a ->
HST.ST (rvector rg)
(requires (fun h0 ->
rv_inv h0 rv /\ rg_inv rg h0 v /\
HS.extends (Rgl?.region_of rg v) (V.frameOf rv) /\
V.forall_all h0 rv
(fun b -> HS.disjoint (Rgl?.region_of rg b)
(Rgl?.region_of rg v))))
(ensures (fun h0 irv h1 ->
V.size_of irv = V.size_of rv + 1ul /\
V.frameOf rv = V.frameOf irv /\
modifies (loc_union (V.loc_addr_of_vector rv)
(V.loc_vector irv)) h0 h1 /\
rv_inv h1 irv /\
V.get h1 irv (V.size_of rv) == v /\
S.equal (as_seq h1 irv)
(S.snoc (as_seq h0 rv) (Rgl?.r_repr rg h0 v))))
#reset-options "--z3rlimit 20"
let insert #a #rst #rg rv v =
let hh0 = HST.get () in
let irv = V.insert rv v in
let hh1 = HST.get () in
// Safety
rs_loc_elems_parent_disj
rg (V.as_seq hh0 rv) (V.frameOf rv) 0 (U32.v (V.size_of rv));
rs_elems_inv_preserved
rg (V.as_seq hh0 rv) 0 (U32.v (V.size_of rv))
(loc_region_only false (V.frameOf rv))
hh0 hh1;
Rgl?.r_sep rg v
(loc_region_only false (V.frameOf rv))
hh0 hh1;
// Correctness
assert (S.equal (V.as_seq hh0 rv)
(S.slice (V.as_seq hh1 irv) 0 (U32.v (V.size_of rv))));
as_seq_seq_preserved
rg (V.as_seq hh0 rv)
0 (U32.v (V.size_of rv))
(loc_region_only false (V.frameOf rv)) hh0 hh1;
as_seq_seq_slice
rg hh1 (V.as_seq hh1 irv) 0 (U32.v (V.size_of irv))
0 (U32.v (V.size_of rv));
irv
val insert_copy:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> cp:copyable #rst a rg ->
rv:rvector rg{not (V.is_full rv)} -> v:a ->
HST.ST (rvector rg)
(requires (fun h0 ->
rv_inv h0 rv /\ rg_inv rg h0 v /\
HS.disjoint (Rgl?.region_of rg v) (V.frameOf rv)))
(ensures (fun h0 irv h1 ->
V.size_of irv = V.size_of rv + 1ul /\
V.frameOf rv = V.frameOf irv /\
modifies (loc_rvector rv) h0 h1 /\
rv_inv h1 irv /\
S.equal (as_seq h1 irv)
(S.snoc (as_seq h0 rv) (Rgl?.r_repr rg h0 v))))
let insert_copy #a #rst #rg cp rv v =
let hh0 = HST.get () in
rv_elems_inv_live_region hh0 rv 0ul (V.size_of rv);
let nrid = HST.new_region (V.frameOf rv) in
let nv = rg_alloc rg nrid in
let hh1 = HST.get () in
Rgl?.r_sep rg v loc_none hh0 hh1;
rv_inv_preserved rv loc_none hh0 hh1;
as_seq_preserved rv loc_none hh0 hh1;
Cpy?.copy cp (Rgl?.state rg) v nv;
let hh2 = HST.get () in
rv_loc_elems_each_disj hh2 rv 0ul (V.size_of rv) nrid;
rv_inv_preserved_ rv (loc_all_regions_from false nrid) hh1 hh2;
as_seq_preserved_ rv (loc_all_regions_from false nrid) hh1 hh2;
insert rv nv
val assign:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
i:uint32_t{i < V.size_of rv} -> v:a ->
HST.ST unit
(requires (fun h0 ->
// rv_inv h0 rv /\
rv_itself_inv h0 rv /\
rv_elems_inv h0 rv 0ul i /\
rv_elems_inv h0 rv (i + 1ul) (V.size_of rv) /\
elems_reg h0 rv /\
V.forall_ h0 rv 0ul i
(fun b -> HS.disjoint (Rgl?.region_of rg b)
(Rgl?.region_of rg v)) /\
V.forall_ h0 rv (i + 1ul) (V.size_of rv)
(fun b -> HS.disjoint (Rgl?.region_of rg b)
(Rgl?.region_of rg v)) /\
rg_inv rg h0 v /\
HS.extends (Rgl?.region_of rg v) (V.frameOf rv)))
(ensures (fun h0 _ h1 ->
modifies (V.loc_vector_within rv i (i + 1ul)) h0 h1 /\
rv_inv h1 rv /\
V.get h1 rv i == v /\
S.equal (as_seq h1 rv)
(S.append
(as_seq_sub h0 rv 0ul i)
(S.cons (Rgl?.r_repr rg h0 v)
(as_seq_sub h0 rv (i + 1ul) (V.size_of rv))))))
let assign #a #rst #rg rv i v =
let hh0 = HST.get () in
V.assign rv i v;
let hh1 = HST.get () in
// Safety
rs_loc_elems_parent_disj
rg (V.as_seq hh0 rv) (V.frameOf rv) 0 (U32.v i);
rs_loc_elems_parent_disj
rg (V.as_seq hh0 rv) (V.frameOf rv) (U32.v i + 1) (U32.v (V.size_of rv));
rs_elems_inv_preserved
rg (V.as_seq hh0 rv) 0 (U32.v i)
(V.loc_vector rv)
hh0 hh1;
rs_elems_inv_preserved
rg (V.as_seq hh0 rv) (U32.v i + 1) (U32.v (V.size_of rv))
(V.loc_vector rv)
hh0 hh1;
Rgl?.r_sep rg v (V.loc_vector rv) hh0 hh1;
// Correctness
rs_loc_elems_parent_disj
rg (V.as_seq hh1 rv) (V.frameOf rv) 0 (U32.v (V.size_of rv));
as_seq_seq_preserved
rg (V.as_seq hh1 rv)
0 (U32.v (V.size_of rv))
(V.loc_vector rv) hh0 hh1
private val r_sep_forall:
#a:Type0 -> #rst:Type -> rg:regional rst a ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
v:a{rg_inv rg h0 v} ->
Lemma (requires (loc_disjoint (loc_all_regions_from
false (Rgl?.region_of rg v)) p /\
modifies p h0 h1))
(ensures (rg_inv rg h1 v /\
Rgl?.r_repr rg h0 v == Rgl?.r_repr rg h1 v))
private let r_sep_forall #a #rst rg p h0 h1 v =
Rgl?.r_sep rg v p h0 h1
val assign_copy:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> cp:copyable #rst a rg ->
rv:rvector rg ->
i:uint32_t{i < V.size_of rv} -> v:a ->
HST.ST unit
(requires (fun h0 ->
rv_inv h0 rv /\
rg_inv rg h0 v /\
HS.disjoint (Rgl?.region_of rg v) (V.frameOf rv)))
(ensures (fun h0 _ h1 ->
modifies (loc_all_regions_from
false (Rgl?.region_of rg (V.get h1 rv i))) h0 h1 /\
rv_inv h1 rv /\
S.equal (as_seq h1 rv)
(S.upd (as_seq h0 rv) (U32.v i) (Rgl?.r_repr rg h0 v))))
let assign_copy #a #rst #rg cp rv i v =
let hh0 = HST.get () in
Cpy?.copy cp (Rgl?.state rg) v (V.index rv i);
let hh1 = HST.get () in
// Safety
rv_inv_preserved_int #a #rst #rg rv i hh0 hh1;
// Correctness
forall_intro
(move_requires
(rs_loc_elem_disj
rg (V.as_seq hh0 rv) (V.frameOf rv)
0 (U32.v (V.size_of rv))
(U32.v i)));
assert (forall (k:nat{k <> U32.v i && k < U32.v (V.size_of rv)}).
loc_disjoint (rs_loc_elem rg (V.as_seq hh0 rv) k)
(rs_loc_elem rg (V.as_seq hh0 rv) (U32.v i)));
forall_intro
(move_requires
(r_sep_forall
rg (rs_loc_elem rg (V.as_seq hh0 rv) (U32.v i))
hh0 hh1));
assert (forall (k:nat{k <> U32.v i && k < U32.v (V.size_of rv)}).
loc_disjoint (rs_loc_elem rg (V.as_seq hh0 rv) k)
(rs_loc_elem rg (V.as_seq hh0 rv) (U32.v i)) ==>
Rgl?.r_repr rg hh1 (S.index (V.as_seq hh1 rv) k) ==
Rgl?.r_repr rg hh0 (S.index (V.as_seq hh0 rv) k));
assert (forall (k:nat{k <> U32.v i && k < U32.v (V.size_of rv)}).
Rgl?.r_repr rg hh1 (S.index (V.as_seq hh1 rv) k) ==
Rgl?.r_repr rg hh0 (S.index (V.as_seq hh0 rv) k));
assert (forall (k:nat{k <> U32.v i && k < U32.v (V.size_of rv)}).
S.index (as_seq_seq rg hh1 (V.as_seq hh1 rv)
0 (U32.v (V.size_of rv))) k ==
S.index (as_seq_seq rg hh0 (V.as_seq hh0 rv)
0 (U32.v (V.size_of rv))) k)
val free_elems:
#a:Type0 -> #rst:Type -> #rg:regional rst a -> rv:rvector rg ->
idx:uint32_t{idx < V.size_of rv} ->
HST.ST unit
(requires (fun h0 ->
V.live h0 rv /\
rv_elems_inv h0 rv 0ul (idx + 1ul) /\
rv_elems_reg h0 rv 0ul (idx + 1ul)))
(ensures (fun h0 _ h1 ->
modifies (rv_loc_elems h0 rv 0ul (idx + 1ul)) h0 h1))
let rec free_elems #a #rst #rg rv idx =
let hh0 = HST.get () in
rg_free rg (V.index rv idx);
let hh1 = HST.get () in
rs_loc_elems_elem_disj
rg (V.as_seq hh0 rv) (V.frameOf rv)
0 (U32.v idx + 1) 0 (U32.v idx) (U32.v idx);
rv_elems_inv_preserved
rv 0ul idx (rs_loc_elem rg (V.as_seq hh0 rv) (U32.v idx)) hh0 hh1;
if idx <> 0ul then
free_elems rv (idx - 1ul)
val flush:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg -> i:uint32_t{i <= V.size_of rv} ->
HST.ST (rvector rg)
(requires (fun h0 -> rv_inv h0 rv))
(ensures (fun h0 frv h1 ->
V.size_of frv = V.size_of rv - i /\
V.frameOf rv = V.frameOf frv /\
modifies (loc_rvector rv) h0 h1 /\
rv_inv h1 frv /\
S.equal (as_seq h1 frv)
(S.slice (as_seq h0 rv) (U32.v i) (U32.v (V.size_of rv))))) | false | false | LowStar.RVector.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 40,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val flush:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
rv:rvector rg -> i:uint32_t{i <= V.size_of rv} ->
HST.ST (rvector rg)
(requires (fun h0 -> rv_inv h0 rv))
(ensures (fun h0 frv h1 ->
V.size_of frv = V.size_of rv - i /\
V.frameOf rv = V.frameOf frv /\
modifies (loc_rvector rv) h0 h1 /\
rv_inv h1 frv /\
S.equal (as_seq h1 frv)
(S.slice (as_seq h0 rv) (U32.v i) (U32.v (V.size_of rv))))) | [] | LowStar.RVector.flush | {
"file_name": "ulib/LowStar.RVector.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | rv: LowStar.RVector.rvector rg -> i: LowStar.Vector.uint32_t{i <= LowStar.Vector.size_of rv}
-> FStar.HyperStack.ST.ST (LowStar.RVector.rvector rg) | {
"end_col": 5,
"end_line": 1079,
"start_col": 28,
"start_line": 1032
} |
Prims.Tot | val va_codegen_success_Compute_Y0 : va_dummy:unit -> Tot va_pbool | [
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64.GF128_Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Poly1305.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64.GF128_Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Poly1305.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let va_codegen_success_Compute_Y0 () =
(va_pbool_and (va_codegen_success_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (va_ttrue ())) | val va_codegen_success_Compute_Y0 : va_dummy:unit -> Tot va_pbool
let va_codegen_success_Compute_Y0 () = | false | null | false | (va_pbool_and (va_codegen_success_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (va_ttrue ())) | {
"checked_file": "Vale.AES.X64.GHash.fst.checked",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.QuickCodes.fsti.checked",
"Vale.X64.QuickCode.fst.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.InsVector.fsti.checked",
"Vale.X64.InsMem.fsti.checked",
"Vale.X64.InsBasic.fsti.checked",
"Vale.X64.InsAes.fsti.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.X64.Decls.fsti.checked",
"Vale.X64.CPU_Features_s.fst.checked",
"Vale.Poly1305.Math.fsti.checked",
"Vale.Math.Poly2_s.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.X64.GF128_Mul.fsti.checked",
"Vale.AES.GHash_s.fst.checked",
"Vale.AES.GHash.fsti.checked",
"Vale.AES.GF128_s.fsti.checked",
"Vale.AES.GF128.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.Seq.Base.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.X64.GHash.fst"
} | [
"total"
] | [
"Prims.unit",
"Vale.X64.Decls.va_pbool_and",
"Vale.X64.InsVector.va_codegen_success_Pxor",
"Vale.X64.Decls.va_op_xmm_xmm",
"Vale.X64.Decls.va_ttrue",
"Vale.X64.Decls.va_pbool"
] | [] | module Vale.AES.X64.GHash
open Vale.Def.Opaque_s
open FStar.Seq
open Vale.Def.Words_s
open Vale.Def.Types_s
open Vale.Arch.Types
open Vale.AES.AES_s
open Vale.AES.GHash_s
open Vale.AES.GHash
open Vale.AES.GF128_s
open Vale.AES.GF128
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open Vale.Math.Poly2_s
open Vale.Poly1305.Math
open Vale.AES.X64.GF128_Mul
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.X64.State
open Vale.X64.Decls
open Vale.X64.InsBasic
open Vale.X64.InsMem
open Vale.X64.InsVector
open Vale.X64.InsAes
open Vale.X64.QuickCode
open Vale.X64.QuickCodes
open Vale.X64.CPU_Features_s
#reset-options "--z3rlimit 30"
//-- Compute_Y0
val va_code_Compute_Y0 : va_dummy:unit -> Tot va_code
[@ "opaque_to_smt" va_qattr]
let va_code_Compute_Y0 () =
(va_Block (va_CCons (va_code_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (va_CNil ())))
val va_codegen_success_Compute_Y0 : va_dummy:unit -> Tot va_pbool
[@ "opaque_to_smt" va_qattr] | false | true | Vale.AES.X64.GHash.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 30,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val va_codegen_success_Compute_Y0 : va_dummy:unit -> Tot va_pbool | [] | Vale.AES.X64.GHash.va_codegen_success_Compute_Y0 | {
"file_name": "obj/Vale.AES.X64.GHash.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | va_dummy: Prims.unit -> Vale.X64.Decls.va_pbool | {
"end_col": 92,
"end_line": 39,
"start_col": 2,
"start_line": 39
} |
Prims.Tot | val va_codegen_success_ReduceMul128_LE : va_dummy:unit -> Tot va_pbool | [
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64.GF128_Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Poly1305.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64.GF128_Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Poly1305.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let va_codegen_success_ReduceMul128_LE () =
(va_pbool_and (va_codegen_success_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_pbool_and
(va_codegen_success_ReduceMulRev128 ()) (va_pbool_and (va_codegen_success_Pshufb (va_op_xmm_xmm
1) (va_op_xmm_xmm 8)) (va_ttrue ())))) | val va_codegen_success_ReduceMul128_LE : va_dummy:unit -> Tot va_pbool
let va_codegen_success_ReduceMul128_LE () = | false | null | false | (va_pbool_and (va_codegen_success_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8))
(va_pbool_and (va_codegen_success_ReduceMulRev128 ())
(va_pbool_and (va_codegen_success_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_ttrue ()))
)) | {
"checked_file": "Vale.AES.X64.GHash.fst.checked",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.QuickCodes.fsti.checked",
"Vale.X64.QuickCode.fst.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.InsVector.fsti.checked",
"Vale.X64.InsMem.fsti.checked",
"Vale.X64.InsBasic.fsti.checked",
"Vale.X64.InsAes.fsti.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.X64.Decls.fsti.checked",
"Vale.X64.CPU_Features_s.fst.checked",
"Vale.Poly1305.Math.fsti.checked",
"Vale.Math.Poly2_s.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.X64.GF128_Mul.fsti.checked",
"Vale.AES.GHash_s.fst.checked",
"Vale.AES.GHash.fsti.checked",
"Vale.AES.GF128_s.fsti.checked",
"Vale.AES.GF128.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.Seq.Base.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.X64.GHash.fst"
} | [
"total"
] | [
"Prims.unit",
"Vale.X64.Decls.va_pbool_and",
"Vale.X64.InsVector.va_codegen_success_Pshufb",
"Vale.X64.Decls.va_op_xmm_xmm",
"Vale.AES.X64.GF128_Mul.va_codegen_success_ReduceMulRev128",
"Vale.X64.Decls.va_ttrue",
"Vale.X64.Decls.va_pbool"
] | [] | module Vale.AES.X64.GHash
open Vale.Def.Opaque_s
open FStar.Seq
open Vale.Def.Words_s
open Vale.Def.Types_s
open Vale.Arch.Types
open Vale.AES.AES_s
open Vale.AES.GHash_s
open Vale.AES.GHash
open Vale.AES.GF128_s
open Vale.AES.GF128
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open Vale.Math.Poly2_s
open Vale.Poly1305.Math
open Vale.AES.X64.GF128_Mul
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.X64.State
open Vale.X64.Decls
open Vale.X64.InsBasic
open Vale.X64.InsMem
open Vale.X64.InsVector
open Vale.X64.InsAes
open Vale.X64.QuickCode
open Vale.X64.QuickCodes
open Vale.X64.CPU_Features_s
#reset-options "--z3rlimit 30"
//-- Compute_Y0
val va_code_Compute_Y0 : va_dummy:unit -> Tot va_code
[@ "opaque_to_smt" va_qattr]
let va_code_Compute_Y0 () =
(va_Block (va_CCons (va_code_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (va_CNil ())))
val va_codegen_success_Compute_Y0 : va_dummy:unit -> Tot va_pbool
[@ "opaque_to_smt" va_qattr]
let va_codegen_success_Compute_Y0 () =
(va_pbool_and (va_codegen_success_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (va_ttrue ()))
[@ "opaque_to_smt" va_qattr]
let va_qcode_Compute_Y0 (va_mods:va_mods_t) : (va_quickCode unit (va_code_Compute_Y0 ())) =
(qblock va_mods (fun (va_s:va_state) -> let (va_old_s:va_state) = va_s in va_QBind va_range1
"***** PRECONDITION NOT MET AT line 83 column 9 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (fun (va_s:va_state) _ -> va_qPURE
va_range1
"***** PRECONDITION NOT MET AT line 84 column 21 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(fun (_:unit) -> Vale.Arch.Types.lemma_quad32_xor ()) (va_QEmpty (())))))
val va_lemma_Compute_Y0 : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Compute_Y0 ()) va_s0 /\ va_get_ok va_s0 /\
sse_enabled))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_get_xmm 1 va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0 /\ va_state_eq
va_sM (va_update_flags va_sM (va_update_xmm 1 va_sM (va_update_ok va_sM va_s0)))))
[@"opaque_to_smt"]
let va_lemma_Compute_Y0 va_b0 va_s0 =
let (va_mods:va_mods_t) = [va_Mod_flags; va_Mod_xmm 1; va_Mod_ok] in
let va_qc = va_qcode_Compute_Y0 va_mods in
let (va_sM, va_fM, va_g) = va_wp_sound_code_norm (va_code_Compute_Y0 ()) va_qc va_s0 (fun va_s0
va_sM va_g -> let () = va_g in label va_range1
"***** POSTCONDITION NOT MET AT line 77 column 1 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_ok va_sM) /\ label va_range1
"***** POSTCONDITION NOT MET AT line 81 column 39 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_xmm 1 va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0)) in
assert_norm (va_qc.mods == va_mods);
va_lemma_norm_mods ([va_Mod_flags; va_Mod_xmm 1; va_Mod_ok]) va_sM va_s0;
(va_sM, va_fM)
[@ va_qattr]
let va_wp_Compute_Y0 (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ sse_enabled /\ (forall (va_x_xmm1:quad32) (va_x_efl:Vale.X64.Flags.t) . let
va_sM = va_upd_flags va_x_efl (va_upd_xmm 1 va_x_xmm1 va_s0) in va_get_ok va_sM /\ va_get_xmm 1
va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0 ==> va_k va_sM (())))
val va_wpProof_Compute_Y0 : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Compute_Y0 va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Compute_Y0 ()) ([va_Mod_flags;
va_Mod_xmm 1]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@"opaque_to_smt"]
let va_wpProof_Compute_Y0 va_s0 va_k =
let (va_sM, va_f0) = va_lemma_Compute_Y0 (va_code_Compute_Y0 ()) va_s0 in
va_lemma_upd_update va_sM;
assert (va_state_eq va_sM (va_update_flags va_sM (va_update_xmm 1 va_sM (va_update_ok va_sM
va_s0))));
va_lemma_norm_mods ([va_Mod_flags; va_Mod_xmm 1]) va_sM va_s0;
let va_g = () in
(va_sM, va_f0, va_g)
[@ "opaque_to_smt" va_qattr]
let va_quick_Compute_Y0 () : (va_quickCode unit (va_code_Compute_Y0 ())) =
(va_QProc (va_code_Compute_Y0 ()) ([va_Mod_flags; va_Mod_xmm 1]) va_wp_Compute_Y0
va_wpProof_Compute_Y0)
//--
//-- ReduceMul128_LE
val va_code_ReduceMul128_LE : va_dummy:unit -> Tot va_code
[@ "opaque_to_smt" va_qattr]
let va_code_ReduceMul128_LE () =
(va_Block (va_CCons (va_code_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_CCons
(va_code_ReduceMulRev128 ()) (va_CCons (va_code_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8))
(va_CNil ())))))
val va_codegen_success_ReduceMul128_LE : va_dummy:unit -> Tot va_pbool
[@ "opaque_to_smt" va_qattr] | false | true | Vale.AES.X64.GHash.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 30,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val va_codegen_success_ReduceMul128_LE : va_dummy:unit -> Tot va_pbool | [] | Vale.AES.X64.GHash.va_codegen_success_ReduceMul128_LE | {
"file_name": "obj/Vale.AES.X64.GHash.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | va_dummy: Prims.unit -> Vale.X64.Decls.va_pbool | {
"end_col": 42,
"end_line": 113,
"start_col": 2,
"start_line": 111
} |
Prims.Tot | val va_code_ReduceMul128_LE : va_dummy:unit -> Tot va_code | [
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64.GF128_Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Poly1305.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64.GF128_Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Poly1305.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let va_code_ReduceMul128_LE () =
(va_Block (va_CCons (va_code_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_CCons
(va_code_ReduceMulRev128 ()) (va_CCons (va_code_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8))
(va_CNil ()))))) | val va_code_ReduceMul128_LE : va_dummy:unit -> Tot va_code
let va_code_ReduceMul128_LE () = | false | null | false | (va_Block (va_CCons (va_code_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8))
(va_CCons (va_code_ReduceMulRev128 ())
(va_CCons (va_code_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_CNil ()))))) | {
"checked_file": "Vale.AES.X64.GHash.fst.checked",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.QuickCodes.fsti.checked",
"Vale.X64.QuickCode.fst.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.InsVector.fsti.checked",
"Vale.X64.InsMem.fsti.checked",
"Vale.X64.InsBasic.fsti.checked",
"Vale.X64.InsAes.fsti.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.X64.Decls.fsti.checked",
"Vale.X64.CPU_Features_s.fst.checked",
"Vale.Poly1305.Math.fsti.checked",
"Vale.Math.Poly2_s.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.X64.GF128_Mul.fsti.checked",
"Vale.AES.GHash_s.fst.checked",
"Vale.AES.GHash.fsti.checked",
"Vale.AES.GF128_s.fsti.checked",
"Vale.AES.GF128.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.Seq.Base.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.X64.GHash.fst"
} | [
"total"
] | [
"Prims.unit",
"Vale.X64.Decls.va_Block",
"Vale.X64.Decls.va_CCons",
"Vale.X64.InsVector.va_code_Pshufb",
"Vale.X64.Decls.va_op_xmm_xmm",
"Vale.AES.X64.GF128_Mul.va_code_ReduceMulRev128",
"Vale.X64.Decls.va_CNil",
"Vale.X64.Decls.va_code"
] | [] | module Vale.AES.X64.GHash
open Vale.Def.Opaque_s
open FStar.Seq
open Vale.Def.Words_s
open Vale.Def.Types_s
open Vale.Arch.Types
open Vale.AES.AES_s
open Vale.AES.GHash_s
open Vale.AES.GHash
open Vale.AES.GF128_s
open Vale.AES.GF128
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open Vale.Math.Poly2_s
open Vale.Poly1305.Math
open Vale.AES.X64.GF128_Mul
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.X64.State
open Vale.X64.Decls
open Vale.X64.InsBasic
open Vale.X64.InsMem
open Vale.X64.InsVector
open Vale.X64.InsAes
open Vale.X64.QuickCode
open Vale.X64.QuickCodes
open Vale.X64.CPU_Features_s
#reset-options "--z3rlimit 30"
//-- Compute_Y0
val va_code_Compute_Y0 : va_dummy:unit -> Tot va_code
[@ "opaque_to_smt" va_qattr]
let va_code_Compute_Y0 () =
(va_Block (va_CCons (va_code_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (va_CNil ())))
val va_codegen_success_Compute_Y0 : va_dummy:unit -> Tot va_pbool
[@ "opaque_to_smt" va_qattr]
let va_codegen_success_Compute_Y0 () =
(va_pbool_and (va_codegen_success_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (va_ttrue ()))
[@ "opaque_to_smt" va_qattr]
let va_qcode_Compute_Y0 (va_mods:va_mods_t) : (va_quickCode unit (va_code_Compute_Y0 ())) =
(qblock va_mods (fun (va_s:va_state) -> let (va_old_s:va_state) = va_s in va_QBind va_range1
"***** PRECONDITION NOT MET AT line 83 column 9 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (fun (va_s:va_state) _ -> va_qPURE
va_range1
"***** PRECONDITION NOT MET AT line 84 column 21 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(fun (_:unit) -> Vale.Arch.Types.lemma_quad32_xor ()) (va_QEmpty (())))))
val va_lemma_Compute_Y0 : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Compute_Y0 ()) va_s0 /\ va_get_ok va_s0 /\
sse_enabled))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_get_xmm 1 va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0 /\ va_state_eq
va_sM (va_update_flags va_sM (va_update_xmm 1 va_sM (va_update_ok va_sM va_s0)))))
[@"opaque_to_smt"]
let va_lemma_Compute_Y0 va_b0 va_s0 =
let (va_mods:va_mods_t) = [va_Mod_flags; va_Mod_xmm 1; va_Mod_ok] in
let va_qc = va_qcode_Compute_Y0 va_mods in
let (va_sM, va_fM, va_g) = va_wp_sound_code_norm (va_code_Compute_Y0 ()) va_qc va_s0 (fun va_s0
va_sM va_g -> let () = va_g in label va_range1
"***** POSTCONDITION NOT MET AT line 77 column 1 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_ok va_sM) /\ label va_range1
"***** POSTCONDITION NOT MET AT line 81 column 39 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_xmm 1 va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0)) in
assert_norm (va_qc.mods == va_mods);
va_lemma_norm_mods ([va_Mod_flags; va_Mod_xmm 1; va_Mod_ok]) va_sM va_s0;
(va_sM, va_fM)
[@ va_qattr]
let va_wp_Compute_Y0 (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ sse_enabled /\ (forall (va_x_xmm1:quad32) (va_x_efl:Vale.X64.Flags.t) . let
va_sM = va_upd_flags va_x_efl (va_upd_xmm 1 va_x_xmm1 va_s0) in va_get_ok va_sM /\ va_get_xmm 1
va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0 ==> va_k va_sM (())))
val va_wpProof_Compute_Y0 : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Compute_Y0 va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Compute_Y0 ()) ([va_Mod_flags;
va_Mod_xmm 1]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@"opaque_to_smt"]
let va_wpProof_Compute_Y0 va_s0 va_k =
let (va_sM, va_f0) = va_lemma_Compute_Y0 (va_code_Compute_Y0 ()) va_s0 in
va_lemma_upd_update va_sM;
assert (va_state_eq va_sM (va_update_flags va_sM (va_update_xmm 1 va_sM (va_update_ok va_sM
va_s0))));
va_lemma_norm_mods ([va_Mod_flags; va_Mod_xmm 1]) va_sM va_s0;
let va_g = () in
(va_sM, va_f0, va_g)
[@ "opaque_to_smt" va_qattr]
let va_quick_Compute_Y0 () : (va_quickCode unit (va_code_Compute_Y0 ())) =
(va_QProc (va_code_Compute_Y0 ()) ([va_Mod_flags; va_Mod_xmm 1]) va_wp_Compute_Y0
va_wpProof_Compute_Y0)
//--
//-- ReduceMul128_LE
val va_code_ReduceMul128_LE : va_dummy:unit -> Tot va_code
[@ "opaque_to_smt" va_qattr] | false | true | Vale.AES.X64.GHash.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 30,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val va_code_ReduceMul128_LE : va_dummy:unit -> Tot va_code | [] | Vale.AES.X64.GHash.va_code_ReduceMul128_LE | {
"file_name": "obj/Vale.AES.X64.GHash.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | va_dummy: Prims.unit -> Vale.X64.Decls.va_code | {
"end_col": 20,
"end_line": 106,
"start_col": 2,
"start_line": 104
} |
Prims.Tot | val va_codegen_success_Compute_ghash_incremental_register : va_dummy:unit -> Tot va_pbool | [
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64.GF128_Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Poly1305.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let va_codegen_success_Compute_ghash_incremental_register () =
(va_pbool_and (va_codegen_success_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 2)) (va_pbool_and
(va_codegen_success_Mov128 (va_op_xmm_xmm 2) (va_op_xmm_xmm 11)) (va_pbool_and
(va_codegen_success_ReduceMul128_LE ()) (va_ttrue ())))) | val va_codegen_success_Compute_ghash_incremental_register : va_dummy:unit -> Tot va_pbool
let va_codegen_success_Compute_ghash_incremental_register () = | false | null | false | (va_pbool_and (va_codegen_success_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 2))
(va_pbool_and (va_codegen_success_Mov128 (va_op_xmm_xmm 2) (va_op_xmm_xmm 11))
(va_pbool_and (va_codegen_success_ReduceMul128_LE ()) (va_ttrue ())))) | {
"checked_file": "Vale.AES.X64.GHash.fst.checked",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.QuickCodes.fsti.checked",
"Vale.X64.QuickCode.fst.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.InsVector.fsti.checked",
"Vale.X64.InsMem.fsti.checked",
"Vale.X64.InsBasic.fsti.checked",
"Vale.X64.InsAes.fsti.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.X64.Decls.fsti.checked",
"Vale.X64.CPU_Features_s.fst.checked",
"Vale.Poly1305.Math.fsti.checked",
"Vale.Math.Poly2_s.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.X64.GF128_Mul.fsti.checked",
"Vale.AES.GHash_s.fst.checked",
"Vale.AES.GHash.fsti.checked",
"Vale.AES.GF128_s.fsti.checked",
"Vale.AES.GF128.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.Seq.Base.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.X64.GHash.fst"
} | [
"total"
] | [
"Prims.unit",
"Vale.X64.Decls.va_pbool_and",
"Vale.X64.InsVector.va_codegen_success_Pxor",
"Vale.X64.Decls.va_op_xmm_xmm",
"Vale.X64.InsVector.va_codegen_success_Mov128",
"Vale.AES.X64.GHash.va_codegen_success_ReduceMul128_LE",
"Vale.X64.Decls.va_ttrue",
"Vale.X64.Decls.va_pbool"
] | [] | module Vale.AES.X64.GHash
open Vale.Def.Opaque_s
open FStar.Seq
open Vale.Def.Words_s
open Vale.Def.Types_s
open Vale.Arch.Types
open Vale.AES.AES_s
open Vale.AES.GHash_s
open Vale.AES.GHash
open Vale.AES.GF128_s
open Vale.AES.GF128
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open Vale.Math.Poly2_s
open Vale.Poly1305.Math
open Vale.AES.X64.GF128_Mul
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.X64.State
open Vale.X64.Decls
open Vale.X64.InsBasic
open Vale.X64.InsMem
open Vale.X64.InsVector
open Vale.X64.InsAes
open Vale.X64.QuickCode
open Vale.X64.QuickCodes
open Vale.X64.CPU_Features_s
#reset-options "--z3rlimit 30"
//-- Compute_Y0
val va_code_Compute_Y0 : va_dummy:unit -> Tot va_code
[@ "opaque_to_smt" va_qattr]
let va_code_Compute_Y0 () =
(va_Block (va_CCons (va_code_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (va_CNil ())))
val va_codegen_success_Compute_Y0 : va_dummy:unit -> Tot va_pbool
[@ "opaque_to_smt" va_qattr]
let va_codegen_success_Compute_Y0 () =
(va_pbool_and (va_codegen_success_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (va_ttrue ()))
[@ "opaque_to_smt" va_qattr]
let va_qcode_Compute_Y0 (va_mods:va_mods_t) : (va_quickCode unit (va_code_Compute_Y0 ())) =
(qblock va_mods (fun (va_s:va_state) -> let (va_old_s:va_state) = va_s in va_QBind va_range1
"***** PRECONDITION NOT MET AT line 83 column 9 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (fun (va_s:va_state) _ -> va_qPURE
va_range1
"***** PRECONDITION NOT MET AT line 84 column 21 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(fun (_:unit) -> Vale.Arch.Types.lemma_quad32_xor ()) (va_QEmpty (())))))
val va_lemma_Compute_Y0 : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Compute_Y0 ()) va_s0 /\ va_get_ok va_s0 /\
sse_enabled))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_get_xmm 1 va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0 /\ va_state_eq
va_sM (va_update_flags va_sM (va_update_xmm 1 va_sM (va_update_ok va_sM va_s0)))))
[@"opaque_to_smt"]
let va_lemma_Compute_Y0 va_b0 va_s0 =
let (va_mods:va_mods_t) = [va_Mod_flags; va_Mod_xmm 1; va_Mod_ok] in
let va_qc = va_qcode_Compute_Y0 va_mods in
let (va_sM, va_fM, va_g) = va_wp_sound_code_norm (va_code_Compute_Y0 ()) va_qc va_s0 (fun va_s0
va_sM va_g -> let () = va_g in label va_range1
"***** POSTCONDITION NOT MET AT line 77 column 1 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_ok va_sM) /\ label va_range1
"***** POSTCONDITION NOT MET AT line 81 column 39 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_xmm 1 va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0)) in
assert_norm (va_qc.mods == va_mods);
va_lemma_norm_mods ([va_Mod_flags; va_Mod_xmm 1; va_Mod_ok]) va_sM va_s0;
(va_sM, va_fM)
[@ va_qattr]
let va_wp_Compute_Y0 (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ sse_enabled /\ (forall (va_x_xmm1:quad32) (va_x_efl:Vale.X64.Flags.t) . let
va_sM = va_upd_flags va_x_efl (va_upd_xmm 1 va_x_xmm1 va_s0) in va_get_ok va_sM /\ va_get_xmm 1
va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0 ==> va_k va_sM (())))
val va_wpProof_Compute_Y0 : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Compute_Y0 va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Compute_Y0 ()) ([va_Mod_flags;
va_Mod_xmm 1]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@"opaque_to_smt"]
let va_wpProof_Compute_Y0 va_s0 va_k =
let (va_sM, va_f0) = va_lemma_Compute_Y0 (va_code_Compute_Y0 ()) va_s0 in
va_lemma_upd_update va_sM;
assert (va_state_eq va_sM (va_update_flags va_sM (va_update_xmm 1 va_sM (va_update_ok va_sM
va_s0))));
va_lemma_norm_mods ([va_Mod_flags; va_Mod_xmm 1]) va_sM va_s0;
let va_g = () in
(va_sM, va_f0, va_g)
[@ "opaque_to_smt" va_qattr]
let va_quick_Compute_Y0 () : (va_quickCode unit (va_code_Compute_Y0 ())) =
(va_QProc (va_code_Compute_Y0 ()) ([va_Mod_flags; va_Mod_xmm 1]) va_wp_Compute_Y0
va_wpProof_Compute_Y0)
//--
//-- ReduceMul128_LE
val va_code_ReduceMul128_LE : va_dummy:unit -> Tot va_code
[@ "opaque_to_smt" va_qattr]
let va_code_ReduceMul128_LE () =
(va_Block (va_CCons (va_code_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_CCons
(va_code_ReduceMulRev128 ()) (va_CCons (va_code_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8))
(va_CNil ())))))
val va_codegen_success_ReduceMul128_LE : va_dummy:unit -> Tot va_pbool
[@ "opaque_to_smt" va_qattr]
let va_codegen_success_ReduceMul128_LE () =
(va_pbool_and (va_codegen_success_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_pbool_and
(va_codegen_success_ReduceMulRev128 ()) (va_pbool_and (va_codegen_success_Pshufb (va_op_xmm_xmm
1) (va_op_xmm_xmm 8)) (va_ttrue ()))))
[@ "opaque_to_smt" va_qattr]
let va_qcode_ReduceMul128_LE (va_mods:va_mods_t) (a:poly) (b:poly) : (va_quickCode unit
(va_code_ReduceMul128_LE ())) =
(qblock va_mods (fun (va_s:va_state) -> let (va_old_s:va_state) = va_s in va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 104 column 11 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 105 column 20 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_ReduceMulRev128 a b) (va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 106 column 11 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_QEmpty (()))))))
val va_lemma_ReduceMul128_LE : va_b0:va_code -> va_s0:va_state -> a:poly -> b:poly
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_ReduceMul128_LE ()) va_s0 /\ va_get_ok va_s0 /\
(pclmulqdq_enabled /\ avx_enabled /\ sse_enabled /\ Vale.Math.Poly2_s.degree a <= 127 /\
Vale.Math.Poly2_s.degree b <= 127 /\ va_get_xmm 1 va_s0 ==
Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32 a) /\ va_get_xmm 2
va_s0 == Vale.AES.GF128_s.gf128_to_quad32 b /\ va_get_xmm 8 va_s0 == Vale.Def.Words_s.Mkfour
#Vale.Def.Types_s.nat32 202182159 134810123 67438087 66051)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_get_xmm 1 va_sM == Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32
(Vale.AES.GF128_s.gf128_mul a b)) /\ va_state_eq va_sM (va_update_xmm 6 va_sM (va_update_xmm 5
va_sM (va_update_xmm 4 va_sM (va_update_xmm 3 va_sM (va_update_xmm 2 va_sM (va_update_xmm 1
va_sM (va_update_reg64 rR12 va_sM (va_update_flags va_sM (va_update_ok va_sM va_s0)))))))))))
[@"opaque_to_smt"]
let va_lemma_ReduceMul128_LE va_b0 va_s0 a b =
let (va_mods:va_mods_t) = [va_Mod_xmm 6; va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2;
va_Mod_xmm 1; va_Mod_reg64 rR12; va_Mod_flags; va_Mod_ok] in
let va_qc = va_qcode_ReduceMul128_LE va_mods a b in
let (va_sM, va_fM, va_g) = va_wp_sound_code_norm (va_code_ReduceMul128_LE ()) va_qc va_s0 (fun
va_s0 va_sM va_g -> let () = va_g in label va_range1
"***** POSTCONDITION NOT MET AT line 87 column 1 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_ok va_sM) /\ label va_range1
"***** POSTCONDITION NOT MET AT line 102 column 71 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_xmm 1 va_sM == Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32
(Vale.AES.GF128_s.gf128_mul a b)))) in
assert_norm (va_qc.mods == va_mods);
va_lemma_norm_mods ([va_Mod_xmm 6; va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2;
va_Mod_xmm 1; va_Mod_reg64 rR12; va_Mod_flags; va_Mod_ok]) va_sM va_s0;
(va_sM, va_fM)
[@ va_qattr]
let va_wp_ReduceMul128_LE (a:poly) (b:poly) (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) :
Type0 =
(va_get_ok va_s0 /\ (pclmulqdq_enabled /\ avx_enabled /\ sse_enabled /\ Vale.Math.Poly2_s.degree
a <= 127 /\ Vale.Math.Poly2_s.degree b <= 127 /\ va_get_xmm 1 va_s0 ==
Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32 a) /\ va_get_xmm 2
va_s0 == Vale.AES.GF128_s.gf128_to_quad32 b /\ va_get_xmm 8 va_s0 == Vale.Def.Words_s.Mkfour
#Vale.Def.Types_s.nat32 202182159 134810123 67438087 66051) /\ (forall
(va_x_efl:Vale.X64.Flags.t) (va_x_r12:nat64) (va_x_xmm1:quad32) (va_x_xmm2:quad32)
(va_x_xmm3:quad32) (va_x_xmm4:quad32) (va_x_xmm5:quad32) (va_x_xmm6:quad32) . let va_sM =
va_upd_xmm 6 va_x_xmm6 (va_upd_xmm 5 va_x_xmm5 (va_upd_xmm 4 va_x_xmm4 (va_upd_xmm 3 va_x_xmm3
(va_upd_xmm 2 va_x_xmm2 (va_upd_xmm 1 va_x_xmm1 (va_upd_reg64 rR12 va_x_r12 (va_upd_flags
va_x_efl va_s0))))))) in va_get_ok va_sM /\ va_get_xmm 1 va_sM ==
Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32
(Vale.AES.GF128_s.gf128_mul a b)) ==> va_k va_sM (())))
val va_wpProof_ReduceMul128_LE : a:poly -> b:poly -> va_s0:va_state -> va_k:(va_state -> unit ->
Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_ReduceMul128_LE a b va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_ReduceMul128_LE ()) ([va_Mod_xmm 6;
va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2; va_Mod_xmm 1; va_Mod_reg64 rR12;
va_Mod_flags]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@"opaque_to_smt"]
let va_wpProof_ReduceMul128_LE a b va_s0 va_k =
let (va_sM, va_f0) = va_lemma_ReduceMul128_LE (va_code_ReduceMul128_LE ()) va_s0 a b in
va_lemma_upd_update va_sM;
assert (va_state_eq va_sM (va_update_xmm 6 va_sM (va_update_xmm 5 va_sM (va_update_xmm 4 va_sM
(va_update_xmm 3 va_sM (va_update_xmm 2 va_sM (va_update_xmm 1 va_sM (va_update_reg64 rR12
va_sM (va_update_flags va_sM (va_update_ok va_sM va_s0))))))))));
va_lemma_norm_mods ([va_Mod_xmm 6; va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2;
va_Mod_xmm 1; va_Mod_reg64 rR12; va_Mod_flags]) va_sM va_s0;
let va_g = () in
(va_sM, va_f0, va_g)
[@ "opaque_to_smt" va_qattr]
let va_quick_ReduceMul128_LE (a:poly) (b:poly) : (va_quickCode unit (va_code_ReduceMul128_LE ())) =
(va_QProc (va_code_ReduceMul128_LE ()) ([va_Mod_xmm 6; va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3;
va_Mod_xmm 2; va_Mod_xmm 1; va_Mod_reg64 rR12; va_Mod_flags]) (va_wp_ReduceMul128_LE a b)
(va_wpProof_ReduceMul128_LE a b))
//--
//-- Compute_ghash_incremental_register
[@ "opaque_to_smt" va_qattr]
let va_code_Compute_ghash_incremental_register () =
(va_Block (va_CCons (va_code_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 2)) (va_CCons (va_code_Mov128
(va_op_xmm_xmm 2) (va_op_xmm_xmm 11)) (va_CCons (va_code_ReduceMul128_LE ()) (va_CNil ())))))
[@ "opaque_to_smt" va_qattr] | false | true | Vale.AES.X64.GHash.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 30,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val va_codegen_success_Compute_ghash_incremental_register : va_dummy:unit -> Tot va_pbool | [] | Vale.AES.X64.GHash.va_codegen_success_Compute_ghash_incremental_register | {
"file_name": "obj/Vale.AES.X64.GHash.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | va_dummy: Prims.unit -> Vale.X64.Decls.va_pbool | {
"end_col": 60,
"end_line": 210,
"start_col": 2,
"start_line": 208
} |
Prims.Tot | val va_code_Compute_Y0 : va_dummy:unit -> Tot va_code | [
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64.GF128_Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Poly1305.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64.GF128_Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Poly1305.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let va_code_Compute_Y0 () =
(va_Block (va_CCons (va_code_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (va_CNil ()))) | val va_code_Compute_Y0 : va_dummy:unit -> Tot va_code
let va_code_Compute_Y0 () = | false | null | false | (va_Block (va_CCons (va_code_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (va_CNil ()))) | {
"checked_file": "Vale.AES.X64.GHash.fst.checked",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.QuickCodes.fsti.checked",
"Vale.X64.QuickCode.fst.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.InsVector.fsti.checked",
"Vale.X64.InsMem.fsti.checked",
"Vale.X64.InsBasic.fsti.checked",
"Vale.X64.InsAes.fsti.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.X64.Decls.fsti.checked",
"Vale.X64.CPU_Features_s.fst.checked",
"Vale.Poly1305.Math.fsti.checked",
"Vale.Math.Poly2_s.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.X64.GF128_Mul.fsti.checked",
"Vale.AES.GHash_s.fst.checked",
"Vale.AES.GHash.fsti.checked",
"Vale.AES.GF128_s.fsti.checked",
"Vale.AES.GF128.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.Seq.Base.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.X64.GHash.fst"
} | [
"total"
] | [
"Prims.unit",
"Vale.X64.Decls.va_Block",
"Vale.X64.Decls.va_CCons",
"Vale.X64.InsVector.va_code_Pxor",
"Vale.X64.Decls.va_op_xmm_xmm",
"Vale.X64.Decls.va_CNil",
"Vale.X64.Decls.va_code"
] | [] | module Vale.AES.X64.GHash
open Vale.Def.Opaque_s
open FStar.Seq
open Vale.Def.Words_s
open Vale.Def.Types_s
open Vale.Arch.Types
open Vale.AES.AES_s
open Vale.AES.GHash_s
open Vale.AES.GHash
open Vale.AES.GF128_s
open Vale.AES.GF128
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open Vale.Math.Poly2_s
open Vale.Poly1305.Math
open Vale.AES.X64.GF128_Mul
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.X64.State
open Vale.X64.Decls
open Vale.X64.InsBasic
open Vale.X64.InsMem
open Vale.X64.InsVector
open Vale.X64.InsAes
open Vale.X64.QuickCode
open Vale.X64.QuickCodes
open Vale.X64.CPU_Features_s
#reset-options "--z3rlimit 30"
//-- Compute_Y0
val va_code_Compute_Y0 : va_dummy:unit -> Tot va_code
[@ "opaque_to_smt" va_qattr] | false | true | Vale.AES.X64.GHash.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 30,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val va_code_Compute_Y0 : va_dummy:unit -> Tot va_code | [] | Vale.AES.X64.GHash.va_code_Compute_Y0 | {
"file_name": "obj/Vale.AES.X64.GHash.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | va_dummy: Prims.unit -> Vale.X64.Decls.va_code | {
"end_col": 87,
"end_line": 34,
"start_col": 2,
"start_line": 34
} |
Prims.Tot | val va_qcode_ReduceMul128_LE (va_mods: va_mods_t) (a b: poly)
: (va_quickCode unit (va_code_ReduceMul128_LE ())) | [
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64.GF128_Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Poly1305.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64.GF128_Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Poly1305.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let va_qcode_ReduceMul128_LE (va_mods:va_mods_t) (a:poly) (b:poly) : (va_quickCode unit
(va_code_ReduceMul128_LE ())) =
(qblock va_mods (fun (va_s:va_state) -> let (va_old_s:va_state) = va_s in va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 104 column 11 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 105 column 20 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_ReduceMulRev128 a b) (va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 106 column 11 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_QEmpty (())))))) | val va_qcode_ReduceMul128_LE (va_mods: va_mods_t) (a b: poly)
: (va_quickCode unit (va_code_ReduceMul128_LE ()))
let va_qcode_ReduceMul128_LE (va_mods: va_mods_t) (a b: poly)
: (va_quickCode unit (va_code_ReduceMul128_LE ())) = | false | null | false | (qblock va_mods
(fun (va_s: va_state) ->
let va_old_s:va_state = va_s in
va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 104 column 11 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8))
(va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 105 column 20 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_ReduceMulRev128 a b)
(va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 106 column 11 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8))
(va_QEmpty (())))))) | {
"checked_file": "Vale.AES.X64.GHash.fst.checked",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.QuickCodes.fsti.checked",
"Vale.X64.QuickCode.fst.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.InsVector.fsti.checked",
"Vale.X64.InsMem.fsti.checked",
"Vale.X64.InsBasic.fsti.checked",
"Vale.X64.InsAes.fsti.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.X64.Decls.fsti.checked",
"Vale.X64.CPU_Features_s.fst.checked",
"Vale.Poly1305.Math.fsti.checked",
"Vale.Math.Poly2_s.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.X64.GF128_Mul.fsti.checked",
"Vale.AES.GHash_s.fst.checked",
"Vale.AES.GHash.fsti.checked",
"Vale.AES.GF128_s.fsti.checked",
"Vale.AES.GF128.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.Seq.Base.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.X64.GHash.fst"
} | [
"total"
] | [
"Vale.X64.QuickCode.va_mods_t",
"Vale.Math.Poly2_s.poly",
"Vale.X64.QuickCodes.qblock",
"Prims.unit",
"Prims.Cons",
"Vale.X64.Decls.va_code",
"Vale.X64.InsVector.va_code_Pshufb",
"Vale.X64.Decls.va_op_xmm_xmm",
"Vale.AES.X64.GF128_Mul.va_code_ReduceMulRev128",
"Prims.Nil",
"Vale.X64.Machine_s.precode",
"Vale.X64.Decls.ins",
"Vale.X64.Decls.ocmp",
"Vale.X64.Decls.va_state",
"Vale.X64.QuickCodes.va_QSeq",
"Vale.X64.QuickCodes.va_range1",
"Vale.X64.InsVector.va_quick_Pshufb",
"Vale.AES.X64.GF128_Mul.va_quick_ReduceMulRev128",
"Vale.X64.QuickCodes.va_QEmpty",
"Vale.X64.State.vale_state",
"Vale.X64.QuickCodes.quickCodes",
"Vale.X64.QuickCode.va_quickCode",
"Vale.AES.X64.GHash.va_code_ReduceMul128_LE"
] | [] | module Vale.AES.X64.GHash
open Vale.Def.Opaque_s
open FStar.Seq
open Vale.Def.Words_s
open Vale.Def.Types_s
open Vale.Arch.Types
open Vale.AES.AES_s
open Vale.AES.GHash_s
open Vale.AES.GHash
open Vale.AES.GF128_s
open Vale.AES.GF128
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open Vale.Math.Poly2_s
open Vale.Poly1305.Math
open Vale.AES.X64.GF128_Mul
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.X64.State
open Vale.X64.Decls
open Vale.X64.InsBasic
open Vale.X64.InsMem
open Vale.X64.InsVector
open Vale.X64.InsAes
open Vale.X64.QuickCode
open Vale.X64.QuickCodes
open Vale.X64.CPU_Features_s
#reset-options "--z3rlimit 30"
//-- Compute_Y0
val va_code_Compute_Y0 : va_dummy:unit -> Tot va_code
[@ "opaque_to_smt" va_qattr]
let va_code_Compute_Y0 () =
(va_Block (va_CCons (va_code_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (va_CNil ())))
val va_codegen_success_Compute_Y0 : va_dummy:unit -> Tot va_pbool
[@ "opaque_to_smt" va_qattr]
let va_codegen_success_Compute_Y0 () =
(va_pbool_and (va_codegen_success_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (va_ttrue ()))
[@ "opaque_to_smt" va_qattr]
let va_qcode_Compute_Y0 (va_mods:va_mods_t) : (va_quickCode unit (va_code_Compute_Y0 ())) =
(qblock va_mods (fun (va_s:va_state) -> let (va_old_s:va_state) = va_s in va_QBind va_range1
"***** PRECONDITION NOT MET AT line 83 column 9 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (fun (va_s:va_state) _ -> va_qPURE
va_range1
"***** PRECONDITION NOT MET AT line 84 column 21 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(fun (_:unit) -> Vale.Arch.Types.lemma_quad32_xor ()) (va_QEmpty (())))))
val va_lemma_Compute_Y0 : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Compute_Y0 ()) va_s0 /\ va_get_ok va_s0 /\
sse_enabled))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_get_xmm 1 va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0 /\ va_state_eq
va_sM (va_update_flags va_sM (va_update_xmm 1 va_sM (va_update_ok va_sM va_s0)))))
[@"opaque_to_smt"]
let va_lemma_Compute_Y0 va_b0 va_s0 =
let (va_mods:va_mods_t) = [va_Mod_flags; va_Mod_xmm 1; va_Mod_ok] in
let va_qc = va_qcode_Compute_Y0 va_mods in
let (va_sM, va_fM, va_g) = va_wp_sound_code_norm (va_code_Compute_Y0 ()) va_qc va_s0 (fun va_s0
va_sM va_g -> let () = va_g in label va_range1
"***** POSTCONDITION NOT MET AT line 77 column 1 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_ok va_sM) /\ label va_range1
"***** POSTCONDITION NOT MET AT line 81 column 39 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_xmm 1 va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0)) in
assert_norm (va_qc.mods == va_mods);
va_lemma_norm_mods ([va_Mod_flags; va_Mod_xmm 1; va_Mod_ok]) va_sM va_s0;
(va_sM, va_fM)
[@ va_qattr]
let va_wp_Compute_Y0 (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ sse_enabled /\ (forall (va_x_xmm1:quad32) (va_x_efl:Vale.X64.Flags.t) . let
va_sM = va_upd_flags va_x_efl (va_upd_xmm 1 va_x_xmm1 va_s0) in va_get_ok va_sM /\ va_get_xmm 1
va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0 ==> va_k va_sM (())))
val va_wpProof_Compute_Y0 : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Compute_Y0 va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Compute_Y0 ()) ([va_Mod_flags;
va_Mod_xmm 1]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@"opaque_to_smt"]
let va_wpProof_Compute_Y0 va_s0 va_k =
let (va_sM, va_f0) = va_lemma_Compute_Y0 (va_code_Compute_Y0 ()) va_s0 in
va_lemma_upd_update va_sM;
assert (va_state_eq va_sM (va_update_flags va_sM (va_update_xmm 1 va_sM (va_update_ok va_sM
va_s0))));
va_lemma_norm_mods ([va_Mod_flags; va_Mod_xmm 1]) va_sM va_s0;
let va_g = () in
(va_sM, va_f0, va_g)
[@ "opaque_to_smt" va_qattr]
let va_quick_Compute_Y0 () : (va_quickCode unit (va_code_Compute_Y0 ())) =
(va_QProc (va_code_Compute_Y0 ()) ([va_Mod_flags; va_Mod_xmm 1]) va_wp_Compute_Y0
va_wpProof_Compute_Y0)
//--
//-- ReduceMul128_LE
val va_code_ReduceMul128_LE : va_dummy:unit -> Tot va_code
[@ "opaque_to_smt" va_qattr]
let va_code_ReduceMul128_LE () =
(va_Block (va_CCons (va_code_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_CCons
(va_code_ReduceMulRev128 ()) (va_CCons (va_code_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8))
(va_CNil ())))))
val va_codegen_success_ReduceMul128_LE : va_dummy:unit -> Tot va_pbool
[@ "opaque_to_smt" va_qattr]
let va_codegen_success_ReduceMul128_LE () =
(va_pbool_and (va_codegen_success_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_pbool_and
(va_codegen_success_ReduceMulRev128 ()) (va_pbool_and (va_codegen_success_Pshufb (va_op_xmm_xmm
1) (va_op_xmm_xmm 8)) (va_ttrue ()))))
[@ "opaque_to_smt" va_qattr]
let va_qcode_ReduceMul128_LE (va_mods:va_mods_t) (a:poly) (b:poly) : (va_quickCode unit | false | false | Vale.AES.X64.GHash.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 30,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val va_qcode_ReduceMul128_LE (va_mods: va_mods_t) (a b: poly)
: (va_quickCode unit (va_code_ReduceMul128_LE ())) | [] | Vale.AES.X64.GHash.va_qcode_ReduceMul128_LE | {
"file_name": "obj/Vale.AES.X64.GHash.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | va_mods: Vale.X64.QuickCode.va_mods_t -> a: Vale.Math.Poly2_s.poly -> b: Vale.Math.Poly2_s.poly
-> Vale.X64.QuickCode.va_quickCode Prims.unit (Vale.AES.X64.GHash.va_code_ReduceMul128_LE ()) | {
"end_col": 78,
"end_line": 124,
"start_col": 2,
"start_line": 118
} |
Prims.Tot | val va_wp_Compute_Y0 (va_s0: va_state) (va_k: (va_state -> unit -> Type0)) : Type0 | [
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64.GF128_Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Poly1305.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64.GF128_Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Poly1305.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let va_wp_Compute_Y0 (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ sse_enabled /\ (forall (va_x_xmm1:quad32) (va_x_efl:Vale.X64.Flags.t) . let
va_sM = va_upd_flags va_x_efl (va_upd_xmm 1 va_x_xmm1 va_s0) in va_get_ok va_sM /\ va_get_xmm 1
va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0 ==> va_k va_sM (()))) | val va_wp_Compute_Y0 (va_s0: va_state) (va_k: (va_state -> unit -> Type0)) : Type0
let va_wp_Compute_Y0 (va_s0: va_state) (va_k: (va_state -> unit -> Type0)) : Type0 = | false | null | false | (va_get_ok va_s0 /\ sse_enabled /\
(forall (va_x_xmm1: quad32) (va_x_efl: Vale.X64.Flags.t).
let va_sM = va_upd_flags va_x_efl (va_upd_xmm 1 va_x_xmm1 va_s0) in
va_get_ok va_sM /\
va_get_xmm 1 va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0 ==>
va_k va_sM (()))) | {
"checked_file": "Vale.AES.X64.GHash.fst.checked",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.QuickCodes.fsti.checked",
"Vale.X64.QuickCode.fst.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.InsVector.fsti.checked",
"Vale.X64.InsMem.fsti.checked",
"Vale.X64.InsBasic.fsti.checked",
"Vale.X64.InsAes.fsti.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.X64.Decls.fsti.checked",
"Vale.X64.CPU_Features_s.fst.checked",
"Vale.Poly1305.Math.fsti.checked",
"Vale.Math.Poly2_s.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.X64.GF128_Mul.fsti.checked",
"Vale.AES.GHash_s.fst.checked",
"Vale.AES.GHash.fsti.checked",
"Vale.AES.GF128_s.fsti.checked",
"Vale.AES.GF128.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.Seq.Base.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.X64.GHash.fst"
} | [
"total"
] | [
"Vale.X64.Decls.va_state",
"Prims.unit",
"Prims.l_and",
"Prims.b2t",
"Vale.X64.Decls.va_get_ok",
"Vale.X64.CPU_Features_s.sse_enabled",
"Prims.l_Forall",
"Vale.X64.Decls.quad32",
"Vale.X64.Flags.t",
"Prims.l_imp",
"Prims.eq2",
"Vale.Def.Words_s.four",
"Vale.Def.Types_s.nat32",
"Vale.X64.Decls.va_get_xmm",
"Vale.Def.Words_s.Mkfour",
"Vale.X64.State.vale_state",
"Vale.X64.Decls.va_upd_flags",
"Vale.X64.Decls.va_upd_xmm"
] | [] | module Vale.AES.X64.GHash
open Vale.Def.Opaque_s
open FStar.Seq
open Vale.Def.Words_s
open Vale.Def.Types_s
open Vale.Arch.Types
open Vale.AES.AES_s
open Vale.AES.GHash_s
open Vale.AES.GHash
open Vale.AES.GF128_s
open Vale.AES.GF128
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open Vale.Math.Poly2_s
open Vale.Poly1305.Math
open Vale.AES.X64.GF128_Mul
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.X64.State
open Vale.X64.Decls
open Vale.X64.InsBasic
open Vale.X64.InsMem
open Vale.X64.InsVector
open Vale.X64.InsAes
open Vale.X64.QuickCode
open Vale.X64.QuickCodes
open Vale.X64.CPU_Features_s
#reset-options "--z3rlimit 30"
//-- Compute_Y0
val va_code_Compute_Y0 : va_dummy:unit -> Tot va_code
[@ "opaque_to_smt" va_qattr]
let va_code_Compute_Y0 () =
(va_Block (va_CCons (va_code_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (va_CNil ())))
val va_codegen_success_Compute_Y0 : va_dummy:unit -> Tot va_pbool
[@ "opaque_to_smt" va_qattr]
let va_codegen_success_Compute_Y0 () =
(va_pbool_and (va_codegen_success_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (va_ttrue ()))
[@ "opaque_to_smt" va_qattr]
let va_qcode_Compute_Y0 (va_mods:va_mods_t) : (va_quickCode unit (va_code_Compute_Y0 ())) =
(qblock va_mods (fun (va_s:va_state) -> let (va_old_s:va_state) = va_s in va_QBind va_range1
"***** PRECONDITION NOT MET AT line 83 column 9 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (fun (va_s:va_state) _ -> va_qPURE
va_range1
"***** PRECONDITION NOT MET AT line 84 column 21 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(fun (_:unit) -> Vale.Arch.Types.lemma_quad32_xor ()) (va_QEmpty (())))))
val va_lemma_Compute_Y0 : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Compute_Y0 ()) va_s0 /\ va_get_ok va_s0 /\
sse_enabled))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_get_xmm 1 va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0 /\ va_state_eq
va_sM (va_update_flags va_sM (va_update_xmm 1 va_sM (va_update_ok va_sM va_s0)))))
[@"opaque_to_smt"]
let va_lemma_Compute_Y0 va_b0 va_s0 =
let (va_mods:va_mods_t) = [va_Mod_flags; va_Mod_xmm 1; va_Mod_ok] in
let va_qc = va_qcode_Compute_Y0 va_mods in
let (va_sM, va_fM, va_g) = va_wp_sound_code_norm (va_code_Compute_Y0 ()) va_qc va_s0 (fun va_s0
va_sM va_g -> let () = va_g in label va_range1
"***** POSTCONDITION NOT MET AT line 77 column 1 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_ok va_sM) /\ label va_range1
"***** POSTCONDITION NOT MET AT line 81 column 39 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_xmm 1 va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0)) in
assert_norm (va_qc.mods == va_mods);
va_lemma_norm_mods ([va_Mod_flags; va_Mod_xmm 1; va_Mod_ok]) va_sM va_s0;
(va_sM, va_fM)
[@ va_qattr] | false | true | Vale.AES.X64.GHash.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 30,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val va_wp_Compute_Y0 (va_s0: va_state) (va_k: (va_state -> unit -> Type0)) : Type0 | [] | Vale.AES.X64.GHash.va_wp_Compute_Y0 | {
"file_name": "obj/Vale.AES.X64.GHash.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | va_s0: Vale.X64.Decls.va_state -> va_k: (_: Vale.X64.Decls.va_state -> _: Prims.unit -> Type0)
-> Type0 | {
"end_col": 90,
"end_line": 76,
"start_col": 2,
"start_line": 74
} |
Prims.Tot | val va_code_Compute_ghash_incremental_register : va_dummy:unit -> Tot va_code | [
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64.GF128_Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Poly1305.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let va_code_Compute_ghash_incremental_register () =
(va_Block (va_CCons (va_code_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 2)) (va_CCons (va_code_Mov128
(va_op_xmm_xmm 2) (va_op_xmm_xmm 11)) (va_CCons (va_code_ReduceMul128_LE ()) (va_CNil ()))))) | val va_code_Compute_ghash_incremental_register : va_dummy:unit -> Tot va_code
let va_code_Compute_ghash_incremental_register () = | false | null | false | (va_Block (va_CCons (va_code_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 2))
(va_CCons (va_code_Mov128 (va_op_xmm_xmm 2) (va_op_xmm_xmm 11))
(va_CCons (va_code_ReduceMul128_LE ()) (va_CNil ()))))) | {
"checked_file": "Vale.AES.X64.GHash.fst.checked",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.QuickCodes.fsti.checked",
"Vale.X64.QuickCode.fst.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.InsVector.fsti.checked",
"Vale.X64.InsMem.fsti.checked",
"Vale.X64.InsBasic.fsti.checked",
"Vale.X64.InsAes.fsti.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.X64.Decls.fsti.checked",
"Vale.X64.CPU_Features_s.fst.checked",
"Vale.Poly1305.Math.fsti.checked",
"Vale.Math.Poly2_s.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.X64.GF128_Mul.fsti.checked",
"Vale.AES.GHash_s.fst.checked",
"Vale.AES.GHash.fsti.checked",
"Vale.AES.GF128_s.fsti.checked",
"Vale.AES.GF128.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.Seq.Base.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.X64.GHash.fst"
} | [
"total"
] | [
"Prims.unit",
"Vale.X64.Decls.va_Block",
"Vale.X64.Decls.va_CCons",
"Vale.X64.InsVector.va_code_Pxor",
"Vale.X64.Decls.va_op_xmm_xmm",
"Vale.X64.InsVector.va_code_Mov128",
"Vale.AES.X64.GHash.va_code_ReduceMul128_LE",
"Vale.X64.Decls.va_CNil",
"Vale.X64.Decls.va_code"
] | [] | module Vale.AES.X64.GHash
open Vale.Def.Opaque_s
open FStar.Seq
open Vale.Def.Words_s
open Vale.Def.Types_s
open Vale.Arch.Types
open Vale.AES.AES_s
open Vale.AES.GHash_s
open Vale.AES.GHash
open Vale.AES.GF128_s
open Vale.AES.GF128
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open Vale.Math.Poly2_s
open Vale.Poly1305.Math
open Vale.AES.X64.GF128_Mul
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.X64.State
open Vale.X64.Decls
open Vale.X64.InsBasic
open Vale.X64.InsMem
open Vale.X64.InsVector
open Vale.X64.InsAes
open Vale.X64.QuickCode
open Vale.X64.QuickCodes
open Vale.X64.CPU_Features_s
#reset-options "--z3rlimit 30"
//-- Compute_Y0
val va_code_Compute_Y0 : va_dummy:unit -> Tot va_code
[@ "opaque_to_smt" va_qattr]
let va_code_Compute_Y0 () =
(va_Block (va_CCons (va_code_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (va_CNil ())))
val va_codegen_success_Compute_Y0 : va_dummy:unit -> Tot va_pbool
[@ "opaque_to_smt" va_qattr]
let va_codegen_success_Compute_Y0 () =
(va_pbool_and (va_codegen_success_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (va_ttrue ()))
[@ "opaque_to_smt" va_qattr]
let va_qcode_Compute_Y0 (va_mods:va_mods_t) : (va_quickCode unit (va_code_Compute_Y0 ())) =
(qblock va_mods (fun (va_s:va_state) -> let (va_old_s:va_state) = va_s in va_QBind va_range1
"***** PRECONDITION NOT MET AT line 83 column 9 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (fun (va_s:va_state) _ -> va_qPURE
va_range1
"***** PRECONDITION NOT MET AT line 84 column 21 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(fun (_:unit) -> Vale.Arch.Types.lemma_quad32_xor ()) (va_QEmpty (())))))
val va_lemma_Compute_Y0 : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Compute_Y0 ()) va_s0 /\ va_get_ok va_s0 /\
sse_enabled))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_get_xmm 1 va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0 /\ va_state_eq
va_sM (va_update_flags va_sM (va_update_xmm 1 va_sM (va_update_ok va_sM va_s0)))))
[@"opaque_to_smt"]
let va_lemma_Compute_Y0 va_b0 va_s0 =
let (va_mods:va_mods_t) = [va_Mod_flags; va_Mod_xmm 1; va_Mod_ok] in
let va_qc = va_qcode_Compute_Y0 va_mods in
let (va_sM, va_fM, va_g) = va_wp_sound_code_norm (va_code_Compute_Y0 ()) va_qc va_s0 (fun va_s0
va_sM va_g -> let () = va_g in label va_range1
"***** POSTCONDITION NOT MET AT line 77 column 1 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_ok va_sM) /\ label va_range1
"***** POSTCONDITION NOT MET AT line 81 column 39 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_xmm 1 va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0)) in
assert_norm (va_qc.mods == va_mods);
va_lemma_norm_mods ([va_Mod_flags; va_Mod_xmm 1; va_Mod_ok]) va_sM va_s0;
(va_sM, va_fM)
[@ va_qattr]
let va_wp_Compute_Y0 (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ sse_enabled /\ (forall (va_x_xmm1:quad32) (va_x_efl:Vale.X64.Flags.t) . let
va_sM = va_upd_flags va_x_efl (va_upd_xmm 1 va_x_xmm1 va_s0) in va_get_ok va_sM /\ va_get_xmm 1
va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0 ==> va_k va_sM (())))
val va_wpProof_Compute_Y0 : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Compute_Y0 va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Compute_Y0 ()) ([va_Mod_flags;
va_Mod_xmm 1]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@"opaque_to_smt"]
let va_wpProof_Compute_Y0 va_s0 va_k =
let (va_sM, va_f0) = va_lemma_Compute_Y0 (va_code_Compute_Y0 ()) va_s0 in
va_lemma_upd_update va_sM;
assert (va_state_eq va_sM (va_update_flags va_sM (va_update_xmm 1 va_sM (va_update_ok va_sM
va_s0))));
va_lemma_norm_mods ([va_Mod_flags; va_Mod_xmm 1]) va_sM va_s0;
let va_g = () in
(va_sM, va_f0, va_g)
[@ "opaque_to_smt" va_qattr]
let va_quick_Compute_Y0 () : (va_quickCode unit (va_code_Compute_Y0 ())) =
(va_QProc (va_code_Compute_Y0 ()) ([va_Mod_flags; va_Mod_xmm 1]) va_wp_Compute_Y0
va_wpProof_Compute_Y0)
//--
//-- ReduceMul128_LE
val va_code_ReduceMul128_LE : va_dummy:unit -> Tot va_code
[@ "opaque_to_smt" va_qattr]
let va_code_ReduceMul128_LE () =
(va_Block (va_CCons (va_code_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_CCons
(va_code_ReduceMulRev128 ()) (va_CCons (va_code_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8))
(va_CNil ())))))
val va_codegen_success_ReduceMul128_LE : va_dummy:unit -> Tot va_pbool
[@ "opaque_to_smt" va_qattr]
let va_codegen_success_ReduceMul128_LE () =
(va_pbool_and (va_codegen_success_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_pbool_and
(va_codegen_success_ReduceMulRev128 ()) (va_pbool_and (va_codegen_success_Pshufb (va_op_xmm_xmm
1) (va_op_xmm_xmm 8)) (va_ttrue ()))))
[@ "opaque_to_smt" va_qattr]
let va_qcode_ReduceMul128_LE (va_mods:va_mods_t) (a:poly) (b:poly) : (va_quickCode unit
(va_code_ReduceMul128_LE ())) =
(qblock va_mods (fun (va_s:va_state) -> let (va_old_s:va_state) = va_s in va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 104 column 11 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 105 column 20 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_ReduceMulRev128 a b) (va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 106 column 11 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_QEmpty (()))))))
val va_lemma_ReduceMul128_LE : va_b0:va_code -> va_s0:va_state -> a:poly -> b:poly
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_ReduceMul128_LE ()) va_s0 /\ va_get_ok va_s0 /\
(pclmulqdq_enabled /\ avx_enabled /\ sse_enabled /\ Vale.Math.Poly2_s.degree a <= 127 /\
Vale.Math.Poly2_s.degree b <= 127 /\ va_get_xmm 1 va_s0 ==
Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32 a) /\ va_get_xmm 2
va_s0 == Vale.AES.GF128_s.gf128_to_quad32 b /\ va_get_xmm 8 va_s0 == Vale.Def.Words_s.Mkfour
#Vale.Def.Types_s.nat32 202182159 134810123 67438087 66051)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_get_xmm 1 va_sM == Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32
(Vale.AES.GF128_s.gf128_mul a b)) /\ va_state_eq va_sM (va_update_xmm 6 va_sM (va_update_xmm 5
va_sM (va_update_xmm 4 va_sM (va_update_xmm 3 va_sM (va_update_xmm 2 va_sM (va_update_xmm 1
va_sM (va_update_reg64 rR12 va_sM (va_update_flags va_sM (va_update_ok va_sM va_s0)))))))))))
[@"opaque_to_smt"]
let va_lemma_ReduceMul128_LE va_b0 va_s0 a b =
let (va_mods:va_mods_t) = [va_Mod_xmm 6; va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2;
va_Mod_xmm 1; va_Mod_reg64 rR12; va_Mod_flags; va_Mod_ok] in
let va_qc = va_qcode_ReduceMul128_LE va_mods a b in
let (va_sM, va_fM, va_g) = va_wp_sound_code_norm (va_code_ReduceMul128_LE ()) va_qc va_s0 (fun
va_s0 va_sM va_g -> let () = va_g in label va_range1
"***** POSTCONDITION NOT MET AT line 87 column 1 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_ok va_sM) /\ label va_range1
"***** POSTCONDITION NOT MET AT line 102 column 71 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_xmm 1 va_sM == Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32
(Vale.AES.GF128_s.gf128_mul a b)))) in
assert_norm (va_qc.mods == va_mods);
va_lemma_norm_mods ([va_Mod_xmm 6; va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2;
va_Mod_xmm 1; va_Mod_reg64 rR12; va_Mod_flags; va_Mod_ok]) va_sM va_s0;
(va_sM, va_fM)
[@ va_qattr]
let va_wp_ReduceMul128_LE (a:poly) (b:poly) (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) :
Type0 =
(va_get_ok va_s0 /\ (pclmulqdq_enabled /\ avx_enabled /\ sse_enabled /\ Vale.Math.Poly2_s.degree
a <= 127 /\ Vale.Math.Poly2_s.degree b <= 127 /\ va_get_xmm 1 va_s0 ==
Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32 a) /\ va_get_xmm 2
va_s0 == Vale.AES.GF128_s.gf128_to_quad32 b /\ va_get_xmm 8 va_s0 == Vale.Def.Words_s.Mkfour
#Vale.Def.Types_s.nat32 202182159 134810123 67438087 66051) /\ (forall
(va_x_efl:Vale.X64.Flags.t) (va_x_r12:nat64) (va_x_xmm1:quad32) (va_x_xmm2:quad32)
(va_x_xmm3:quad32) (va_x_xmm4:quad32) (va_x_xmm5:quad32) (va_x_xmm6:quad32) . let va_sM =
va_upd_xmm 6 va_x_xmm6 (va_upd_xmm 5 va_x_xmm5 (va_upd_xmm 4 va_x_xmm4 (va_upd_xmm 3 va_x_xmm3
(va_upd_xmm 2 va_x_xmm2 (va_upd_xmm 1 va_x_xmm1 (va_upd_reg64 rR12 va_x_r12 (va_upd_flags
va_x_efl va_s0))))))) in va_get_ok va_sM /\ va_get_xmm 1 va_sM ==
Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32
(Vale.AES.GF128_s.gf128_mul a b)) ==> va_k va_sM (())))
val va_wpProof_ReduceMul128_LE : a:poly -> b:poly -> va_s0:va_state -> va_k:(va_state -> unit ->
Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_ReduceMul128_LE a b va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_ReduceMul128_LE ()) ([va_Mod_xmm 6;
va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2; va_Mod_xmm 1; va_Mod_reg64 rR12;
va_Mod_flags]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@"opaque_to_smt"]
let va_wpProof_ReduceMul128_LE a b va_s0 va_k =
let (va_sM, va_f0) = va_lemma_ReduceMul128_LE (va_code_ReduceMul128_LE ()) va_s0 a b in
va_lemma_upd_update va_sM;
assert (va_state_eq va_sM (va_update_xmm 6 va_sM (va_update_xmm 5 va_sM (va_update_xmm 4 va_sM
(va_update_xmm 3 va_sM (va_update_xmm 2 va_sM (va_update_xmm 1 va_sM (va_update_reg64 rR12
va_sM (va_update_flags va_sM (va_update_ok va_sM va_s0))))))))));
va_lemma_norm_mods ([va_Mod_xmm 6; va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2;
va_Mod_xmm 1; va_Mod_reg64 rR12; va_Mod_flags]) va_sM va_s0;
let va_g = () in
(va_sM, va_f0, va_g)
[@ "opaque_to_smt" va_qattr]
let va_quick_ReduceMul128_LE (a:poly) (b:poly) : (va_quickCode unit (va_code_ReduceMul128_LE ())) =
(va_QProc (va_code_ReduceMul128_LE ()) ([va_Mod_xmm 6; va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3;
va_Mod_xmm 2; va_Mod_xmm 1; va_Mod_reg64 rR12; va_Mod_flags]) (va_wp_ReduceMul128_LE a b)
(va_wpProof_ReduceMul128_LE a b))
//--
//-- Compute_ghash_incremental_register
[@ "opaque_to_smt" va_qattr] | false | true | Vale.AES.X64.GHash.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 30,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val va_code_Compute_ghash_incremental_register : va_dummy:unit -> Tot va_code | [] | Vale.AES.X64.GHash.va_code_Compute_ghash_incremental_register | {
"file_name": "obj/Vale.AES.X64.GHash.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | va_dummy: Prims.unit -> Vale.X64.Decls.va_code | {
"end_col": 97,
"end_line": 204,
"start_col": 2,
"start_line": 203
} |
Prims.Tot | val va_wp_ReduceMul128_LE (a b: poly) (va_s0: va_state) (va_k: (va_state -> unit -> Type0)) : Type0 | [
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64.GF128_Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Poly1305.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64.GF128_Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Poly1305.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let va_wp_ReduceMul128_LE (a:poly) (b:poly) (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) :
Type0 =
(va_get_ok va_s0 /\ (pclmulqdq_enabled /\ avx_enabled /\ sse_enabled /\ Vale.Math.Poly2_s.degree
a <= 127 /\ Vale.Math.Poly2_s.degree b <= 127 /\ va_get_xmm 1 va_s0 ==
Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32 a) /\ va_get_xmm 2
va_s0 == Vale.AES.GF128_s.gf128_to_quad32 b /\ va_get_xmm 8 va_s0 == Vale.Def.Words_s.Mkfour
#Vale.Def.Types_s.nat32 202182159 134810123 67438087 66051) /\ (forall
(va_x_efl:Vale.X64.Flags.t) (va_x_r12:nat64) (va_x_xmm1:quad32) (va_x_xmm2:quad32)
(va_x_xmm3:quad32) (va_x_xmm4:quad32) (va_x_xmm5:quad32) (va_x_xmm6:quad32) . let va_sM =
va_upd_xmm 6 va_x_xmm6 (va_upd_xmm 5 va_x_xmm5 (va_upd_xmm 4 va_x_xmm4 (va_upd_xmm 3 va_x_xmm3
(va_upd_xmm 2 va_x_xmm2 (va_upd_xmm 1 va_x_xmm1 (va_upd_reg64 rR12 va_x_r12 (va_upd_flags
va_x_efl va_s0))))))) in va_get_ok va_sM /\ va_get_xmm 1 va_sM ==
Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32
(Vale.AES.GF128_s.gf128_mul a b)) ==> va_k va_sM (()))) | val va_wp_ReduceMul128_LE (a b: poly) (va_s0: va_state) (va_k: (va_state -> unit -> Type0)) : Type0
let va_wp_ReduceMul128_LE (a b: poly) (va_s0: va_state) (va_k: (va_state -> unit -> Type0)) : Type0 = | false | null | false | (va_get_ok va_s0 /\
(pclmulqdq_enabled /\ avx_enabled /\ sse_enabled /\ Vale.Math.Poly2_s.degree a <= 127 /\
Vale.Math.Poly2_s.degree b <= 127 /\
va_get_xmm 1 va_s0 == Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32 a) /\
va_get_xmm 2 va_s0 == Vale.AES.GF128_s.gf128_to_quad32 b /\
va_get_xmm 8 va_s0 ==
Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 202182159 134810123 67438087 66051) /\
(forall (va_x_efl: Vale.X64.Flags.t)
(va_x_r12: nat64)
(va_x_xmm1: quad32)
(va_x_xmm2: quad32)
(va_x_xmm3: quad32)
(va_x_xmm4: quad32)
(va_x_xmm5: quad32)
(va_x_xmm6: quad32).
let va_sM =
va_upd_xmm 6
va_x_xmm6
(va_upd_xmm 5
va_x_xmm5
(va_upd_xmm 4
va_x_xmm4
(va_upd_xmm 3
va_x_xmm3
(va_upd_xmm 2
va_x_xmm2
(va_upd_xmm 1
va_x_xmm1
(va_upd_reg64 rR12 va_x_r12 (va_upd_flags va_x_efl va_s0)))))))
in
va_get_ok va_sM /\
va_get_xmm 1 va_sM ==
Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32 (Vale.AES.GF128_s.gf128_mul
a
b)) ==>
va_k va_sM (()))) | {
"checked_file": "Vale.AES.X64.GHash.fst.checked",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.QuickCodes.fsti.checked",
"Vale.X64.QuickCode.fst.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.InsVector.fsti.checked",
"Vale.X64.InsMem.fsti.checked",
"Vale.X64.InsBasic.fsti.checked",
"Vale.X64.InsAes.fsti.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.X64.Decls.fsti.checked",
"Vale.X64.CPU_Features_s.fst.checked",
"Vale.Poly1305.Math.fsti.checked",
"Vale.Math.Poly2_s.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.X64.GF128_Mul.fsti.checked",
"Vale.AES.GHash_s.fst.checked",
"Vale.AES.GHash.fsti.checked",
"Vale.AES.GF128_s.fsti.checked",
"Vale.AES.GF128.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.Seq.Base.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.X64.GHash.fst"
} | [
"total"
] | [
"Vale.Math.Poly2_s.poly",
"Vale.X64.Decls.va_state",
"Prims.unit",
"Prims.l_and",
"Prims.b2t",
"Vale.X64.Decls.va_get_ok",
"Vale.X64.CPU_Features_s.pclmulqdq_enabled",
"Vale.X64.CPU_Features_s.avx_enabled",
"Vale.X64.CPU_Features_s.sse_enabled",
"Prims.op_LessThanOrEqual",
"Vale.Math.Poly2_s.degree",
"Prims.eq2",
"Vale.Def.Types_s.quad32",
"Vale.X64.Decls.va_get_xmm",
"Vale.Def.Types_s.reverse_bytes_quad32",
"Vale.AES.GF128_s.gf128_to_quad32",
"Vale.Def.Words_s.four",
"Vale.Def.Types_s.nat32",
"Vale.Def.Words_s.Mkfour",
"Prims.l_Forall",
"Vale.X64.Flags.t",
"Vale.X64.Memory.nat64",
"Vale.X64.Decls.quad32",
"Prims.l_imp",
"Vale.AES.GF128_s.gf128_mul",
"Vale.X64.State.vale_state",
"Vale.X64.Decls.va_upd_xmm",
"Vale.X64.Decls.va_upd_reg64",
"Vale.X64.Machine_s.rR12",
"Vale.X64.Decls.va_upd_flags"
] | [] | module Vale.AES.X64.GHash
open Vale.Def.Opaque_s
open FStar.Seq
open Vale.Def.Words_s
open Vale.Def.Types_s
open Vale.Arch.Types
open Vale.AES.AES_s
open Vale.AES.GHash_s
open Vale.AES.GHash
open Vale.AES.GF128_s
open Vale.AES.GF128
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open Vale.Math.Poly2_s
open Vale.Poly1305.Math
open Vale.AES.X64.GF128_Mul
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.X64.State
open Vale.X64.Decls
open Vale.X64.InsBasic
open Vale.X64.InsMem
open Vale.X64.InsVector
open Vale.X64.InsAes
open Vale.X64.QuickCode
open Vale.X64.QuickCodes
open Vale.X64.CPU_Features_s
#reset-options "--z3rlimit 30"
//-- Compute_Y0
val va_code_Compute_Y0 : va_dummy:unit -> Tot va_code
[@ "opaque_to_smt" va_qattr]
let va_code_Compute_Y0 () =
(va_Block (va_CCons (va_code_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (va_CNil ())))
val va_codegen_success_Compute_Y0 : va_dummy:unit -> Tot va_pbool
[@ "opaque_to_smt" va_qattr]
let va_codegen_success_Compute_Y0 () =
(va_pbool_and (va_codegen_success_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (va_ttrue ()))
[@ "opaque_to_smt" va_qattr]
let va_qcode_Compute_Y0 (va_mods:va_mods_t) : (va_quickCode unit (va_code_Compute_Y0 ())) =
(qblock va_mods (fun (va_s:va_state) -> let (va_old_s:va_state) = va_s in va_QBind va_range1
"***** PRECONDITION NOT MET AT line 83 column 9 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (fun (va_s:va_state) _ -> va_qPURE
va_range1
"***** PRECONDITION NOT MET AT line 84 column 21 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(fun (_:unit) -> Vale.Arch.Types.lemma_quad32_xor ()) (va_QEmpty (())))))
val va_lemma_Compute_Y0 : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Compute_Y0 ()) va_s0 /\ va_get_ok va_s0 /\
sse_enabled))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_get_xmm 1 va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0 /\ va_state_eq
va_sM (va_update_flags va_sM (va_update_xmm 1 va_sM (va_update_ok va_sM va_s0)))))
[@"opaque_to_smt"]
let va_lemma_Compute_Y0 va_b0 va_s0 =
let (va_mods:va_mods_t) = [va_Mod_flags; va_Mod_xmm 1; va_Mod_ok] in
let va_qc = va_qcode_Compute_Y0 va_mods in
let (va_sM, va_fM, va_g) = va_wp_sound_code_norm (va_code_Compute_Y0 ()) va_qc va_s0 (fun va_s0
va_sM va_g -> let () = va_g in label va_range1
"***** POSTCONDITION NOT MET AT line 77 column 1 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_ok va_sM) /\ label va_range1
"***** POSTCONDITION NOT MET AT line 81 column 39 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_xmm 1 va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0)) in
assert_norm (va_qc.mods == va_mods);
va_lemma_norm_mods ([va_Mod_flags; va_Mod_xmm 1; va_Mod_ok]) va_sM va_s0;
(va_sM, va_fM)
[@ va_qattr]
let va_wp_Compute_Y0 (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ sse_enabled /\ (forall (va_x_xmm1:quad32) (va_x_efl:Vale.X64.Flags.t) . let
va_sM = va_upd_flags va_x_efl (va_upd_xmm 1 va_x_xmm1 va_s0) in va_get_ok va_sM /\ va_get_xmm 1
va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0 ==> va_k va_sM (())))
val va_wpProof_Compute_Y0 : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Compute_Y0 va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Compute_Y0 ()) ([va_Mod_flags;
va_Mod_xmm 1]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@"opaque_to_smt"]
let va_wpProof_Compute_Y0 va_s0 va_k =
let (va_sM, va_f0) = va_lemma_Compute_Y0 (va_code_Compute_Y0 ()) va_s0 in
va_lemma_upd_update va_sM;
assert (va_state_eq va_sM (va_update_flags va_sM (va_update_xmm 1 va_sM (va_update_ok va_sM
va_s0))));
va_lemma_norm_mods ([va_Mod_flags; va_Mod_xmm 1]) va_sM va_s0;
let va_g = () in
(va_sM, va_f0, va_g)
[@ "opaque_to_smt" va_qattr]
let va_quick_Compute_Y0 () : (va_quickCode unit (va_code_Compute_Y0 ())) =
(va_QProc (va_code_Compute_Y0 ()) ([va_Mod_flags; va_Mod_xmm 1]) va_wp_Compute_Y0
va_wpProof_Compute_Y0)
//--
//-- ReduceMul128_LE
val va_code_ReduceMul128_LE : va_dummy:unit -> Tot va_code
[@ "opaque_to_smt" va_qattr]
let va_code_ReduceMul128_LE () =
(va_Block (va_CCons (va_code_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_CCons
(va_code_ReduceMulRev128 ()) (va_CCons (va_code_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8))
(va_CNil ())))))
val va_codegen_success_ReduceMul128_LE : va_dummy:unit -> Tot va_pbool
[@ "opaque_to_smt" va_qattr]
let va_codegen_success_ReduceMul128_LE () =
(va_pbool_and (va_codegen_success_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_pbool_and
(va_codegen_success_ReduceMulRev128 ()) (va_pbool_and (va_codegen_success_Pshufb (va_op_xmm_xmm
1) (va_op_xmm_xmm 8)) (va_ttrue ()))))
[@ "opaque_to_smt" va_qattr]
let va_qcode_ReduceMul128_LE (va_mods:va_mods_t) (a:poly) (b:poly) : (va_quickCode unit
(va_code_ReduceMul128_LE ())) =
(qblock va_mods (fun (va_s:va_state) -> let (va_old_s:va_state) = va_s in va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 104 column 11 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 105 column 20 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_ReduceMulRev128 a b) (va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 106 column 11 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_QEmpty (()))))))
val va_lemma_ReduceMul128_LE : va_b0:va_code -> va_s0:va_state -> a:poly -> b:poly
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_ReduceMul128_LE ()) va_s0 /\ va_get_ok va_s0 /\
(pclmulqdq_enabled /\ avx_enabled /\ sse_enabled /\ Vale.Math.Poly2_s.degree a <= 127 /\
Vale.Math.Poly2_s.degree b <= 127 /\ va_get_xmm 1 va_s0 ==
Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32 a) /\ va_get_xmm 2
va_s0 == Vale.AES.GF128_s.gf128_to_quad32 b /\ va_get_xmm 8 va_s0 == Vale.Def.Words_s.Mkfour
#Vale.Def.Types_s.nat32 202182159 134810123 67438087 66051)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_get_xmm 1 va_sM == Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32
(Vale.AES.GF128_s.gf128_mul a b)) /\ va_state_eq va_sM (va_update_xmm 6 va_sM (va_update_xmm 5
va_sM (va_update_xmm 4 va_sM (va_update_xmm 3 va_sM (va_update_xmm 2 va_sM (va_update_xmm 1
va_sM (va_update_reg64 rR12 va_sM (va_update_flags va_sM (va_update_ok va_sM va_s0)))))))))))
[@"opaque_to_smt"]
let va_lemma_ReduceMul128_LE va_b0 va_s0 a b =
let (va_mods:va_mods_t) = [va_Mod_xmm 6; va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2;
va_Mod_xmm 1; va_Mod_reg64 rR12; va_Mod_flags; va_Mod_ok] in
let va_qc = va_qcode_ReduceMul128_LE va_mods a b in
let (va_sM, va_fM, va_g) = va_wp_sound_code_norm (va_code_ReduceMul128_LE ()) va_qc va_s0 (fun
va_s0 va_sM va_g -> let () = va_g in label va_range1
"***** POSTCONDITION NOT MET AT line 87 column 1 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_ok va_sM) /\ label va_range1
"***** POSTCONDITION NOT MET AT line 102 column 71 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_xmm 1 va_sM == Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32
(Vale.AES.GF128_s.gf128_mul a b)))) in
assert_norm (va_qc.mods == va_mods);
va_lemma_norm_mods ([va_Mod_xmm 6; va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2;
va_Mod_xmm 1; va_Mod_reg64 rR12; va_Mod_flags; va_Mod_ok]) va_sM va_s0;
(va_sM, va_fM)
[@ va_qattr]
let va_wp_ReduceMul128_LE (a:poly) (b:poly) (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : | false | true | Vale.AES.X64.GHash.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 30,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val va_wp_ReduceMul128_LE (a b: poly) (va_s0: va_state) (va_k: (va_state -> unit -> Type0)) : Type0 | [] | Vale.AES.X64.GHash.va_wp_ReduceMul128_LE | {
"file_name": "obj/Vale.AES.X64.GHash.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
a: Vale.Math.Poly2_s.poly ->
b: Vale.Math.Poly2_s.poly ->
va_s0: Vale.X64.Decls.va_state ->
va_k: (_: Vale.X64.Decls.va_state -> _: Prims.unit -> Type0)
-> Type0 | {
"end_col": 59,
"end_line": 171,
"start_col": 2,
"start_line": 160
} |
Prims.Tot | val va_quick_Compute_Y0: Prims.unit -> (va_quickCode unit (va_code_Compute_Y0 ())) | [
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64.GF128_Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Poly1305.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64.GF128_Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Poly1305.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let va_quick_Compute_Y0 () : (va_quickCode unit (va_code_Compute_Y0 ())) =
(va_QProc (va_code_Compute_Y0 ()) ([va_Mod_flags; va_Mod_xmm 1]) va_wp_Compute_Y0
va_wpProof_Compute_Y0) | val va_quick_Compute_Y0: Prims.unit -> (va_quickCode unit (va_code_Compute_Y0 ()))
let va_quick_Compute_Y0 () : (va_quickCode unit (va_code_Compute_Y0 ())) = | false | null | false | (va_QProc (va_code_Compute_Y0 ())
([va_Mod_flags; va_Mod_xmm 1])
va_wp_Compute_Y0
va_wpProof_Compute_Y0) | {
"checked_file": "Vale.AES.X64.GHash.fst.checked",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.QuickCodes.fsti.checked",
"Vale.X64.QuickCode.fst.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.InsVector.fsti.checked",
"Vale.X64.InsMem.fsti.checked",
"Vale.X64.InsBasic.fsti.checked",
"Vale.X64.InsAes.fsti.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.X64.Decls.fsti.checked",
"Vale.X64.CPU_Features_s.fst.checked",
"Vale.Poly1305.Math.fsti.checked",
"Vale.Math.Poly2_s.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.X64.GF128_Mul.fsti.checked",
"Vale.AES.GHash_s.fst.checked",
"Vale.AES.GHash.fsti.checked",
"Vale.AES.GF128_s.fsti.checked",
"Vale.AES.GF128.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.Seq.Base.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.X64.GHash.fst"
} | [
"total"
] | [
"Prims.unit",
"Vale.X64.QuickCode.va_QProc",
"Vale.AES.X64.GHash.va_code_Compute_Y0",
"Prims.Cons",
"Vale.X64.QuickCode.mod_t",
"Vale.X64.QuickCode.va_Mod_flags",
"Vale.X64.QuickCode.va_Mod_xmm",
"Prims.Nil",
"Vale.AES.X64.GHash.va_wp_Compute_Y0",
"Vale.AES.X64.GHash.va_wpProof_Compute_Y0",
"Vale.X64.QuickCode.va_quickCode"
] | [] | module Vale.AES.X64.GHash
open Vale.Def.Opaque_s
open FStar.Seq
open Vale.Def.Words_s
open Vale.Def.Types_s
open Vale.Arch.Types
open Vale.AES.AES_s
open Vale.AES.GHash_s
open Vale.AES.GHash
open Vale.AES.GF128_s
open Vale.AES.GF128
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open Vale.Math.Poly2_s
open Vale.Poly1305.Math
open Vale.AES.X64.GF128_Mul
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.X64.State
open Vale.X64.Decls
open Vale.X64.InsBasic
open Vale.X64.InsMem
open Vale.X64.InsVector
open Vale.X64.InsAes
open Vale.X64.QuickCode
open Vale.X64.QuickCodes
open Vale.X64.CPU_Features_s
#reset-options "--z3rlimit 30"
//-- Compute_Y0
val va_code_Compute_Y0 : va_dummy:unit -> Tot va_code
[@ "opaque_to_smt" va_qattr]
let va_code_Compute_Y0 () =
(va_Block (va_CCons (va_code_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (va_CNil ())))
val va_codegen_success_Compute_Y0 : va_dummy:unit -> Tot va_pbool
[@ "opaque_to_smt" va_qattr]
let va_codegen_success_Compute_Y0 () =
(va_pbool_and (va_codegen_success_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (va_ttrue ()))
[@ "opaque_to_smt" va_qattr]
let va_qcode_Compute_Y0 (va_mods:va_mods_t) : (va_quickCode unit (va_code_Compute_Y0 ())) =
(qblock va_mods (fun (va_s:va_state) -> let (va_old_s:va_state) = va_s in va_QBind va_range1
"***** PRECONDITION NOT MET AT line 83 column 9 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (fun (va_s:va_state) _ -> va_qPURE
va_range1
"***** PRECONDITION NOT MET AT line 84 column 21 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(fun (_:unit) -> Vale.Arch.Types.lemma_quad32_xor ()) (va_QEmpty (())))))
val va_lemma_Compute_Y0 : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Compute_Y0 ()) va_s0 /\ va_get_ok va_s0 /\
sse_enabled))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_get_xmm 1 va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0 /\ va_state_eq
va_sM (va_update_flags va_sM (va_update_xmm 1 va_sM (va_update_ok va_sM va_s0)))))
[@"opaque_to_smt"]
let va_lemma_Compute_Y0 va_b0 va_s0 =
let (va_mods:va_mods_t) = [va_Mod_flags; va_Mod_xmm 1; va_Mod_ok] in
let va_qc = va_qcode_Compute_Y0 va_mods in
let (va_sM, va_fM, va_g) = va_wp_sound_code_norm (va_code_Compute_Y0 ()) va_qc va_s0 (fun va_s0
va_sM va_g -> let () = va_g in label va_range1
"***** POSTCONDITION NOT MET AT line 77 column 1 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_ok va_sM) /\ label va_range1
"***** POSTCONDITION NOT MET AT line 81 column 39 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_xmm 1 va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0)) in
assert_norm (va_qc.mods == va_mods);
va_lemma_norm_mods ([va_Mod_flags; va_Mod_xmm 1; va_Mod_ok]) va_sM va_s0;
(va_sM, va_fM)
[@ va_qattr]
let va_wp_Compute_Y0 (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ sse_enabled /\ (forall (va_x_xmm1:quad32) (va_x_efl:Vale.X64.Flags.t) . let
va_sM = va_upd_flags va_x_efl (va_upd_xmm 1 va_x_xmm1 va_s0) in va_get_ok va_sM /\ va_get_xmm 1
va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0 ==> va_k va_sM (())))
val va_wpProof_Compute_Y0 : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Compute_Y0 va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Compute_Y0 ()) ([va_Mod_flags;
va_Mod_xmm 1]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@"opaque_to_smt"]
let va_wpProof_Compute_Y0 va_s0 va_k =
let (va_sM, va_f0) = va_lemma_Compute_Y0 (va_code_Compute_Y0 ()) va_s0 in
va_lemma_upd_update va_sM;
assert (va_state_eq va_sM (va_update_flags va_sM (va_update_xmm 1 va_sM (va_update_ok va_sM
va_s0))));
va_lemma_norm_mods ([va_Mod_flags; va_Mod_xmm 1]) va_sM va_s0;
let va_g = () in
(va_sM, va_f0, va_g)
[@ "opaque_to_smt" va_qattr] | false | false | Vale.AES.X64.GHash.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 30,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val va_quick_Compute_Y0: Prims.unit -> (va_quickCode unit (va_code_Compute_Y0 ())) | [] | Vale.AES.X64.GHash.va_quick_Compute_Y0 | {
"file_name": "obj/Vale.AES.X64.GHash.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | _: Prims.unit
-> Vale.X64.QuickCode.va_quickCode Prims.unit (Vale.AES.X64.GHash.va_code_Compute_Y0 ()) | {
"end_col": 26,
"end_line": 97,
"start_col": 2,
"start_line": 96
} |
Prims.Ghost | val va_wpProof_Compute_Y0 : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Compute_Y0 va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Compute_Y0 ()) ([va_Mod_flags;
va_Mod_xmm 1]) va_s0 va_k ((va_sM, va_f0, va_g)))) | [
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64.GF128_Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Poly1305.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64.GF128_Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Poly1305.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let va_wpProof_Compute_Y0 va_s0 va_k =
let (va_sM, va_f0) = va_lemma_Compute_Y0 (va_code_Compute_Y0 ()) va_s0 in
va_lemma_upd_update va_sM;
assert (va_state_eq va_sM (va_update_flags va_sM (va_update_xmm 1 va_sM (va_update_ok va_sM
va_s0))));
va_lemma_norm_mods ([va_Mod_flags; va_Mod_xmm 1]) va_sM va_s0;
let va_g = () in
(va_sM, va_f0, va_g) | val va_wpProof_Compute_Y0 : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Compute_Y0 va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Compute_Y0 ()) ([va_Mod_flags;
va_Mod_xmm 1]) va_s0 va_k ((va_sM, va_f0, va_g))))
let va_wpProof_Compute_Y0 va_s0 va_k = | false | null | false | let va_sM, va_f0 = va_lemma_Compute_Y0 (va_code_Compute_Y0 ()) va_s0 in
va_lemma_upd_update va_sM;
assert (va_state_eq va_sM (va_update_flags va_sM (va_update_xmm 1 va_sM (va_update_ok va_sM va_s0)))
);
va_lemma_norm_mods ([va_Mod_flags; va_Mod_xmm 1]) va_sM va_s0;
let va_g = () in
(va_sM, va_f0, va_g) | {
"checked_file": "Vale.AES.X64.GHash.fst.checked",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.QuickCodes.fsti.checked",
"Vale.X64.QuickCode.fst.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.InsVector.fsti.checked",
"Vale.X64.InsMem.fsti.checked",
"Vale.X64.InsBasic.fsti.checked",
"Vale.X64.InsAes.fsti.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.X64.Decls.fsti.checked",
"Vale.X64.CPU_Features_s.fst.checked",
"Vale.Poly1305.Math.fsti.checked",
"Vale.Math.Poly2_s.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.X64.GF128_Mul.fsti.checked",
"Vale.AES.GHash_s.fst.checked",
"Vale.AES.GHash.fsti.checked",
"Vale.AES.GF128_s.fsti.checked",
"Vale.AES.GF128.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.Seq.Base.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.X64.GHash.fst"
} | [] | [
"Vale.X64.Decls.va_state",
"Prims.unit",
"Vale.X64.Decls.va_fuel",
"FStar.Pervasives.Native.Mktuple3",
"Vale.X64.QuickCode.va_lemma_norm_mods",
"Prims.Cons",
"Vale.X64.QuickCode.mod_t",
"Vale.X64.QuickCode.va_Mod_flags",
"Vale.X64.QuickCode.va_Mod_xmm",
"Prims.Nil",
"Prims._assert",
"Vale.X64.Decls.va_state_eq",
"Vale.X64.Decls.va_update_flags",
"Vale.X64.Decls.va_update_xmm",
"Vale.X64.Decls.va_update_ok",
"Vale.X64.Decls.va_lemma_upd_update",
"FStar.Pervasives.Native.tuple3",
"FStar.Pervasives.Native.tuple2",
"Vale.X64.State.vale_state",
"Vale.AES.X64.GHash.va_lemma_Compute_Y0",
"Vale.AES.X64.GHash.va_code_Compute_Y0"
] | [] | module Vale.AES.X64.GHash
open Vale.Def.Opaque_s
open FStar.Seq
open Vale.Def.Words_s
open Vale.Def.Types_s
open Vale.Arch.Types
open Vale.AES.AES_s
open Vale.AES.GHash_s
open Vale.AES.GHash
open Vale.AES.GF128_s
open Vale.AES.GF128
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open Vale.Math.Poly2_s
open Vale.Poly1305.Math
open Vale.AES.X64.GF128_Mul
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.X64.State
open Vale.X64.Decls
open Vale.X64.InsBasic
open Vale.X64.InsMem
open Vale.X64.InsVector
open Vale.X64.InsAes
open Vale.X64.QuickCode
open Vale.X64.QuickCodes
open Vale.X64.CPU_Features_s
#reset-options "--z3rlimit 30"
//-- Compute_Y0
val va_code_Compute_Y0 : va_dummy:unit -> Tot va_code
[@ "opaque_to_smt" va_qattr]
let va_code_Compute_Y0 () =
(va_Block (va_CCons (va_code_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (va_CNil ())))
val va_codegen_success_Compute_Y0 : va_dummy:unit -> Tot va_pbool
[@ "opaque_to_smt" va_qattr]
let va_codegen_success_Compute_Y0 () =
(va_pbool_and (va_codegen_success_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (va_ttrue ()))
[@ "opaque_to_smt" va_qattr]
let va_qcode_Compute_Y0 (va_mods:va_mods_t) : (va_quickCode unit (va_code_Compute_Y0 ())) =
(qblock va_mods (fun (va_s:va_state) -> let (va_old_s:va_state) = va_s in va_QBind va_range1
"***** PRECONDITION NOT MET AT line 83 column 9 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (fun (va_s:va_state) _ -> va_qPURE
va_range1
"***** PRECONDITION NOT MET AT line 84 column 21 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(fun (_:unit) -> Vale.Arch.Types.lemma_quad32_xor ()) (va_QEmpty (())))))
val va_lemma_Compute_Y0 : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Compute_Y0 ()) va_s0 /\ va_get_ok va_s0 /\
sse_enabled))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_get_xmm 1 va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0 /\ va_state_eq
va_sM (va_update_flags va_sM (va_update_xmm 1 va_sM (va_update_ok va_sM va_s0)))))
[@"opaque_to_smt"]
let va_lemma_Compute_Y0 va_b0 va_s0 =
let (va_mods:va_mods_t) = [va_Mod_flags; va_Mod_xmm 1; va_Mod_ok] in
let va_qc = va_qcode_Compute_Y0 va_mods in
let (va_sM, va_fM, va_g) = va_wp_sound_code_norm (va_code_Compute_Y0 ()) va_qc va_s0 (fun va_s0
va_sM va_g -> let () = va_g in label va_range1
"***** POSTCONDITION NOT MET AT line 77 column 1 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_ok va_sM) /\ label va_range1
"***** POSTCONDITION NOT MET AT line 81 column 39 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_xmm 1 va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0)) in
assert_norm (va_qc.mods == va_mods);
va_lemma_norm_mods ([va_Mod_flags; va_Mod_xmm 1; va_Mod_ok]) va_sM va_s0;
(va_sM, va_fM)
[@ va_qattr]
let va_wp_Compute_Y0 (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ sse_enabled /\ (forall (va_x_xmm1:quad32) (va_x_efl:Vale.X64.Flags.t) . let
va_sM = va_upd_flags va_x_efl (va_upd_xmm 1 va_x_xmm1 va_s0) in va_get_ok va_sM /\ va_get_xmm 1
va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0 ==> va_k va_sM (())))
val va_wpProof_Compute_Y0 : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Compute_Y0 va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Compute_Y0 ()) ([va_Mod_flags;
va_Mod_xmm 1]) va_s0 va_k ((va_sM, va_f0, va_g)))) | false | false | Vale.AES.X64.GHash.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 30,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val va_wpProof_Compute_Y0 : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Compute_Y0 va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Compute_Y0 ()) ([va_Mod_flags;
va_Mod_xmm 1]) va_s0 va_k ((va_sM, va_f0, va_g)))) | [] | Vale.AES.X64.GHash.va_wpProof_Compute_Y0 | {
"file_name": "obj/Vale.AES.X64.GHash.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | va_s0: Vale.X64.Decls.va_state -> va_k: (_: Vale.X64.Decls.va_state -> _: Prims.unit -> Type0)
-> Prims.Ghost ((Vale.X64.Decls.va_state * Vale.X64.Decls.va_fuel) * Prims.unit) | {
"end_col": 22,
"end_line": 92,
"start_col": 38,
"start_line": 85
} |
Prims.Tot | val va_quick_ReduceMul128_LE (a b: poly) : (va_quickCode unit (va_code_ReduceMul128_LE ())) | [
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64.GF128_Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Poly1305.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64.GF128_Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Poly1305.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let va_quick_ReduceMul128_LE (a:poly) (b:poly) : (va_quickCode unit (va_code_ReduceMul128_LE ())) =
(va_QProc (va_code_ReduceMul128_LE ()) ([va_Mod_xmm 6; va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3;
va_Mod_xmm 2; va_Mod_xmm 1; va_Mod_reg64 rR12; va_Mod_flags]) (va_wp_ReduceMul128_LE a b)
(va_wpProof_ReduceMul128_LE a b)) | val va_quick_ReduceMul128_LE (a b: poly) : (va_quickCode unit (va_code_ReduceMul128_LE ()))
let va_quick_ReduceMul128_LE (a b: poly) : (va_quickCode unit (va_code_ReduceMul128_LE ())) = | false | null | false | (va_QProc (va_code_ReduceMul128_LE ())
([
va_Mod_xmm 6;
va_Mod_xmm 5;
va_Mod_xmm 4;
va_Mod_xmm 3;
va_Mod_xmm 2;
va_Mod_xmm 1;
va_Mod_reg64 rR12;
va_Mod_flags
])
(va_wp_ReduceMul128_LE a b)
(va_wpProof_ReduceMul128_LE a b)) | {
"checked_file": "Vale.AES.X64.GHash.fst.checked",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.QuickCodes.fsti.checked",
"Vale.X64.QuickCode.fst.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.InsVector.fsti.checked",
"Vale.X64.InsMem.fsti.checked",
"Vale.X64.InsBasic.fsti.checked",
"Vale.X64.InsAes.fsti.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.X64.Decls.fsti.checked",
"Vale.X64.CPU_Features_s.fst.checked",
"Vale.Poly1305.Math.fsti.checked",
"Vale.Math.Poly2_s.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.X64.GF128_Mul.fsti.checked",
"Vale.AES.GHash_s.fst.checked",
"Vale.AES.GHash.fsti.checked",
"Vale.AES.GF128_s.fsti.checked",
"Vale.AES.GF128.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.Seq.Base.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.X64.GHash.fst"
} | [
"total"
] | [
"Vale.Math.Poly2_s.poly",
"Vale.X64.QuickCode.va_QProc",
"Prims.unit",
"Vale.AES.X64.GHash.va_code_ReduceMul128_LE",
"Prims.Cons",
"Vale.X64.QuickCode.mod_t",
"Vale.X64.QuickCode.va_Mod_xmm",
"Vale.X64.QuickCode.va_Mod_reg64",
"Vale.X64.Machine_s.rR12",
"Vale.X64.QuickCode.va_Mod_flags",
"Prims.Nil",
"Vale.AES.X64.GHash.va_wp_ReduceMul128_LE",
"Vale.AES.X64.GHash.va_wpProof_ReduceMul128_LE",
"Vale.X64.QuickCode.va_quickCode"
] | [] | module Vale.AES.X64.GHash
open Vale.Def.Opaque_s
open FStar.Seq
open Vale.Def.Words_s
open Vale.Def.Types_s
open Vale.Arch.Types
open Vale.AES.AES_s
open Vale.AES.GHash_s
open Vale.AES.GHash
open Vale.AES.GF128_s
open Vale.AES.GF128
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open Vale.Math.Poly2_s
open Vale.Poly1305.Math
open Vale.AES.X64.GF128_Mul
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.X64.State
open Vale.X64.Decls
open Vale.X64.InsBasic
open Vale.X64.InsMem
open Vale.X64.InsVector
open Vale.X64.InsAes
open Vale.X64.QuickCode
open Vale.X64.QuickCodes
open Vale.X64.CPU_Features_s
#reset-options "--z3rlimit 30"
//-- Compute_Y0
val va_code_Compute_Y0 : va_dummy:unit -> Tot va_code
[@ "opaque_to_smt" va_qattr]
let va_code_Compute_Y0 () =
(va_Block (va_CCons (va_code_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (va_CNil ())))
val va_codegen_success_Compute_Y0 : va_dummy:unit -> Tot va_pbool
[@ "opaque_to_smt" va_qattr]
let va_codegen_success_Compute_Y0 () =
(va_pbool_and (va_codegen_success_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (va_ttrue ()))
[@ "opaque_to_smt" va_qattr]
let va_qcode_Compute_Y0 (va_mods:va_mods_t) : (va_quickCode unit (va_code_Compute_Y0 ())) =
(qblock va_mods (fun (va_s:va_state) -> let (va_old_s:va_state) = va_s in va_QBind va_range1
"***** PRECONDITION NOT MET AT line 83 column 9 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (fun (va_s:va_state) _ -> va_qPURE
va_range1
"***** PRECONDITION NOT MET AT line 84 column 21 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(fun (_:unit) -> Vale.Arch.Types.lemma_quad32_xor ()) (va_QEmpty (())))))
val va_lemma_Compute_Y0 : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Compute_Y0 ()) va_s0 /\ va_get_ok va_s0 /\
sse_enabled))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_get_xmm 1 va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0 /\ va_state_eq
va_sM (va_update_flags va_sM (va_update_xmm 1 va_sM (va_update_ok va_sM va_s0)))))
[@"opaque_to_smt"]
let va_lemma_Compute_Y0 va_b0 va_s0 =
let (va_mods:va_mods_t) = [va_Mod_flags; va_Mod_xmm 1; va_Mod_ok] in
let va_qc = va_qcode_Compute_Y0 va_mods in
let (va_sM, va_fM, va_g) = va_wp_sound_code_norm (va_code_Compute_Y0 ()) va_qc va_s0 (fun va_s0
va_sM va_g -> let () = va_g in label va_range1
"***** POSTCONDITION NOT MET AT line 77 column 1 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_ok va_sM) /\ label va_range1
"***** POSTCONDITION NOT MET AT line 81 column 39 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_xmm 1 va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0)) in
assert_norm (va_qc.mods == va_mods);
va_lemma_norm_mods ([va_Mod_flags; va_Mod_xmm 1; va_Mod_ok]) va_sM va_s0;
(va_sM, va_fM)
[@ va_qattr]
let va_wp_Compute_Y0 (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ sse_enabled /\ (forall (va_x_xmm1:quad32) (va_x_efl:Vale.X64.Flags.t) . let
va_sM = va_upd_flags va_x_efl (va_upd_xmm 1 va_x_xmm1 va_s0) in va_get_ok va_sM /\ va_get_xmm 1
va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0 ==> va_k va_sM (())))
val va_wpProof_Compute_Y0 : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Compute_Y0 va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Compute_Y0 ()) ([va_Mod_flags;
va_Mod_xmm 1]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@"opaque_to_smt"]
let va_wpProof_Compute_Y0 va_s0 va_k =
let (va_sM, va_f0) = va_lemma_Compute_Y0 (va_code_Compute_Y0 ()) va_s0 in
va_lemma_upd_update va_sM;
assert (va_state_eq va_sM (va_update_flags va_sM (va_update_xmm 1 va_sM (va_update_ok va_sM
va_s0))));
va_lemma_norm_mods ([va_Mod_flags; va_Mod_xmm 1]) va_sM va_s0;
let va_g = () in
(va_sM, va_f0, va_g)
[@ "opaque_to_smt" va_qattr]
let va_quick_Compute_Y0 () : (va_quickCode unit (va_code_Compute_Y0 ())) =
(va_QProc (va_code_Compute_Y0 ()) ([va_Mod_flags; va_Mod_xmm 1]) va_wp_Compute_Y0
va_wpProof_Compute_Y0)
//--
//-- ReduceMul128_LE
val va_code_ReduceMul128_LE : va_dummy:unit -> Tot va_code
[@ "opaque_to_smt" va_qattr]
let va_code_ReduceMul128_LE () =
(va_Block (va_CCons (va_code_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_CCons
(va_code_ReduceMulRev128 ()) (va_CCons (va_code_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8))
(va_CNil ())))))
val va_codegen_success_ReduceMul128_LE : va_dummy:unit -> Tot va_pbool
[@ "opaque_to_smt" va_qattr]
let va_codegen_success_ReduceMul128_LE () =
(va_pbool_and (va_codegen_success_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_pbool_and
(va_codegen_success_ReduceMulRev128 ()) (va_pbool_and (va_codegen_success_Pshufb (va_op_xmm_xmm
1) (va_op_xmm_xmm 8)) (va_ttrue ()))))
[@ "opaque_to_smt" va_qattr]
let va_qcode_ReduceMul128_LE (va_mods:va_mods_t) (a:poly) (b:poly) : (va_quickCode unit
(va_code_ReduceMul128_LE ())) =
(qblock va_mods (fun (va_s:va_state) -> let (va_old_s:va_state) = va_s in va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 104 column 11 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 105 column 20 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_ReduceMulRev128 a b) (va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 106 column 11 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_QEmpty (()))))))
val va_lemma_ReduceMul128_LE : va_b0:va_code -> va_s0:va_state -> a:poly -> b:poly
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_ReduceMul128_LE ()) va_s0 /\ va_get_ok va_s0 /\
(pclmulqdq_enabled /\ avx_enabled /\ sse_enabled /\ Vale.Math.Poly2_s.degree a <= 127 /\
Vale.Math.Poly2_s.degree b <= 127 /\ va_get_xmm 1 va_s0 ==
Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32 a) /\ va_get_xmm 2
va_s0 == Vale.AES.GF128_s.gf128_to_quad32 b /\ va_get_xmm 8 va_s0 == Vale.Def.Words_s.Mkfour
#Vale.Def.Types_s.nat32 202182159 134810123 67438087 66051)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_get_xmm 1 va_sM == Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32
(Vale.AES.GF128_s.gf128_mul a b)) /\ va_state_eq va_sM (va_update_xmm 6 va_sM (va_update_xmm 5
va_sM (va_update_xmm 4 va_sM (va_update_xmm 3 va_sM (va_update_xmm 2 va_sM (va_update_xmm 1
va_sM (va_update_reg64 rR12 va_sM (va_update_flags va_sM (va_update_ok va_sM va_s0)))))))))))
[@"opaque_to_smt"]
let va_lemma_ReduceMul128_LE va_b0 va_s0 a b =
let (va_mods:va_mods_t) = [va_Mod_xmm 6; va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2;
va_Mod_xmm 1; va_Mod_reg64 rR12; va_Mod_flags; va_Mod_ok] in
let va_qc = va_qcode_ReduceMul128_LE va_mods a b in
let (va_sM, va_fM, va_g) = va_wp_sound_code_norm (va_code_ReduceMul128_LE ()) va_qc va_s0 (fun
va_s0 va_sM va_g -> let () = va_g in label va_range1
"***** POSTCONDITION NOT MET AT line 87 column 1 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_ok va_sM) /\ label va_range1
"***** POSTCONDITION NOT MET AT line 102 column 71 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_xmm 1 va_sM == Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32
(Vale.AES.GF128_s.gf128_mul a b)))) in
assert_norm (va_qc.mods == va_mods);
va_lemma_norm_mods ([va_Mod_xmm 6; va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2;
va_Mod_xmm 1; va_Mod_reg64 rR12; va_Mod_flags; va_Mod_ok]) va_sM va_s0;
(va_sM, va_fM)
[@ va_qattr]
let va_wp_ReduceMul128_LE (a:poly) (b:poly) (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) :
Type0 =
(va_get_ok va_s0 /\ (pclmulqdq_enabled /\ avx_enabled /\ sse_enabled /\ Vale.Math.Poly2_s.degree
a <= 127 /\ Vale.Math.Poly2_s.degree b <= 127 /\ va_get_xmm 1 va_s0 ==
Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32 a) /\ va_get_xmm 2
va_s0 == Vale.AES.GF128_s.gf128_to_quad32 b /\ va_get_xmm 8 va_s0 == Vale.Def.Words_s.Mkfour
#Vale.Def.Types_s.nat32 202182159 134810123 67438087 66051) /\ (forall
(va_x_efl:Vale.X64.Flags.t) (va_x_r12:nat64) (va_x_xmm1:quad32) (va_x_xmm2:quad32)
(va_x_xmm3:quad32) (va_x_xmm4:quad32) (va_x_xmm5:quad32) (va_x_xmm6:quad32) . let va_sM =
va_upd_xmm 6 va_x_xmm6 (va_upd_xmm 5 va_x_xmm5 (va_upd_xmm 4 va_x_xmm4 (va_upd_xmm 3 va_x_xmm3
(va_upd_xmm 2 va_x_xmm2 (va_upd_xmm 1 va_x_xmm1 (va_upd_reg64 rR12 va_x_r12 (va_upd_flags
va_x_efl va_s0))))))) in va_get_ok va_sM /\ va_get_xmm 1 va_sM ==
Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32
(Vale.AES.GF128_s.gf128_mul a b)) ==> va_k va_sM (())))
val va_wpProof_ReduceMul128_LE : a:poly -> b:poly -> va_s0:va_state -> va_k:(va_state -> unit ->
Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_ReduceMul128_LE a b va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_ReduceMul128_LE ()) ([va_Mod_xmm 6;
va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2; va_Mod_xmm 1; va_Mod_reg64 rR12;
va_Mod_flags]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@"opaque_to_smt"]
let va_wpProof_ReduceMul128_LE a b va_s0 va_k =
let (va_sM, va_f0) = va_lemma_ReduceMul128_LE (va_code_ReduceMul128_LE ()) va_s0 a b in
va_lemma_upd_update va_sM;
assert (va_state_eq va_sM (va_update_xmm 6 va_sM (va_update_xmm 5 va_sM (va_update_xmm 4 va_sM
(va_update_xmm 3 va_sM (va_update_xmm 2 va_sM (va_update_xmm 1 va_sM (va_update_reg64 rR12
va_sM (va_update_flags va_sM (va_update_ok va_sM va_s0))))))))));
va_lemma_norm_mods ([va_Mod_xmm 6; va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2;
va_Mod_xmm 1; va_Mod_reg64 rR12; va_Mod_flags]) va_sM va_s0;
let va_g = () in
(va_sM, va_f0, va_g)
[@ "opaque_to_smt" va_qattr] | false | false | Vale.AES.X64.GHash.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 30,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val va_quick_ReduceMul128_LE (a b: poly) : (va_quickCode unit (va_code_ReduceMul128_LE ())) | [] | Vale.AES.X64.GHash.va_quick_ReduceMul128_LE | {
"file_name": "obj/Vale.AES.X64.GHash.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: Vale.Math.Poly2_s.poly -> b: Vale.Math.Poly2_s.poly
-> Vale.X64.QuickCode.va_quickCode Prims.unit (Vale.AES.X64.GHash.va_code_ReduceMul128_LE ()) | {
"end_col": 37,
"end_line": 197,
"start_col": 2,
"start_line": 195
} |
Prims.Tot | val va_qcode_Compute_ghash_incremental_register (va_mods: va_mods_t)
: (va_quickCode unit (va_code_Compute_ghash_incremental_register ())) | [
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64.GF128_Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Poly1305.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64.GF128_Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Poly1305.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let va_qcode_Compute_ghash_incremental_register (va_mods:va_mods_t) : (va_quickCode unit
(va_code_Compute_ghash_incremental_register ())) =
(qblock va_mods (fun (va_s:va_state) -> let (va_old_s:va_state) = va_s in va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 124 column 9 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 2)) (va_QBind va_range1
"***** PRECONDITION NOT MET AT line 125 column 11 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Mov128 (va_op_xmm_xmm 2) (va_op_xmm_xmm 11)) (fun (va_s:va_state) _ -> va_QBind
va_range1
"***** PRECONDITION NOT MET AT line 127 column 20 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_ReduceMul128_LE (Vale.AES.GF128_s.gf128_of_quad32
(Vale.Def.Types_s.reverse_bytes_quad32 (va_get_xmm 1 va_s))) (Vale.AES.GF128_s.gf128_of_quad32
(va_get_xmm 11 va_s))) (fun (va_s:va_state) _ -> va_qPURE va_range1
"***** PRECONDITION NOT MET AT line 128 column 29 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(fun (_:unit) -> Vale.AES.GHash.ghash_incremental_reveal ()) (va_QEmpty (()))))))) | val va_qcode_Compute_ghash_incremental_register (va_mods: va_mods_t)
: (va_quickCode unit (va_code_Compute_ghash_incremental_register ()))
let va_qcode_Compute_ghash_incremental_register (va_mods: va_mods_t)
: (va_quickCode unit (va_code_Compute_ghash_incremental_register ())) = | false | null | false | (qblock va_mods
(fun (va_s: va_state) ->
let va_old_s:va_state = va_s in
va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 124 column 9 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 2))
(va_QBind va_range1
"***** PRECONDITION NOT MET AT line 125 column 11 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Mov128 (va_op_xmm_xmm 2) (va_op_xmm_xmm 11))
(fun (va_s: va_state) _ ->
va_QBind va_range1
"***** PRECONDITION NOT MET AT line 127 column 20 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_ReduceMul128_LE (Vale.AES.GF128_s.gf128_of_quad32 (Vale.Def.Types_s.reverse_bytes_quad32
(va_get_xmm 1 va_s)))
(Vale.AES.GF128_s.gf128_of_quad32 (va_get_xmm 11 va_s)))
(fun (va_s: va_state) _ ->
va_qPURE va_range1
"***** PRECONDITION NOT MET AT line 128 column 29 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(fun (_: unit) -> Vale.AES.GHash.ghash_incremental_reveal ())
(va_QEmpty (()))))))) | {
"checked_file": "Vale.AES.X64.GHash.fst.checked",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.QuickCodes.fsti.checked",
"Vale.X64.QuickCode.fst.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.InsVector.fsti.checked",
"Vale.X64.InsMem.fsti.checked",
"Vale.X64.InsBasic.fsti.checked",
"Vale.X64.InsAes.fsti.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.X64.Decls.fsti.checked",
"Vale.X64.CPU_Features_s.fst.checked",
"Vale.Poly1305.Math.fsti.checked",
"Vale.Math.Poly2_s.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.X64.GF128_Mul.fsti.checked",
"Vale.AES.GHash_s.fst.checked",
"Vale.AES.GHash.fsti.checked",
"Vale.AES.GF128_s.fsti.checked",
"Vale.AES.GF128.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.Seq.Base.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.X64.GHash.fst"
} | [
"total"
] | [
"Vale.X64.QuickCode.va_mods_t",
"Vale.X64.QuickCodes.qblock",
"Prims.unit",
"Prims.Cons",
"Vale.X64.Decls.va_code",
"Vale.X64.InsVector.va_code_Pxor",
"Vale.X64.Decls.va_op_xmm_xmm",
"Vale.X64.InsVector.va_code_Mov128",
"Vale.AES.X64.GHash.va_code_ReduceMul128_LE",
"Prims.Nil",
"Vale.X64.Machine_s.precode",
"Vale.X64.Decls.ins",
"Vale.X64.Decls.ocmp",
"Vale.X64.Decls.va_state",
"Vale.X64.QuickCodes.va_QSeq",
"Vale.X64.QuickCodes.va_range1",
"Vale.X64.InsVector.va_quick_Pxor",
"Vale.X64.QuickCodes.va_QBind",
"Vale.X64.InsVector.va_quick_Mov128",
"Vale.AES.X64.GHash.va_quick_ReduceMul128_LE",
"Vale.AES.GF128_s.gf128_of_quad32",
"Vale.Def.Types_s.reverse_bytes_quad32",
"Vale.X64.Decls.va_get_xmm",
"Vale.X64.QuickCodes.va_qPURE",
"Prims.pure_post",
"Prims.l_and",
"Prims.l_True",
"Prims.l_Forall",
"Prims.l_imp",
"Prims.eq2",
"Vale.Def.Types_s.quad32",
"FStar.Seq.Base.seq",
"Vale.AES.GHash.ghash_incremental",
"Vale.AES.GHash.ghash_incremental_def",
"Vale.AES.GHash.ghash_incremental_reveal",
"Vale.X64.QuickCodes.va_QEmpty",
"Vale.X64.QuickCodes.quickCodes",
"Vale.X64.State.vale_state",
"Vale.X64.QuickCode.va_quickCode",
"Vale.AES.X64.GHash.va_code_Compute_ghash_incremental_register"
] | [] | module Vale.AES.X64.GHash
open Vale.Def.Opaque_s
open FStar.Seq
open Vale.Def.Words_s
open Vale.Def.Types_s
open Vale.Arch.Types
open Vale.AES.AES_s
open Vale.AES.GHash_s
open Vale.AES.GHash
open Vale.AES.GF128_s
open Vale.AES.GF128
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open Vale.Math.Poly2_s
open Vale.Poly1305.Math
open Vale.AES.X64.GF128_Mul
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.X64.State
open Vale.X64.Decls
open Vale.X64.InsBasic
open Vale.X64.InsMem
open Vale.X64.InsVector
open Vale.X64.InsAes
open Vale.X64.QuickCode
open Vale.X64.QuickCodes
open Vale.X64.CPU_Features_s
#reset-options "--z3rlimit 30"
//-- Compute_Y0
val va_code_Compute_Y0 : va_dummy:unit -> Tot va_code
[@ "opaque_to_smt" va_qattr]
let va_code_Compute_Y0 () =
(va_Block (va_CCons (va_code_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (va_CNil ())))
val va_codegen_success_Compute_Y0 : va_dummy:unit -> Tot va_pbool
[@ "opaque_to_smt" va_qattr]
let va_codegen_success_Compute_Y0 () =
(va_pbool_and (va_codegen_success_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (va_ttrue ()))
[@ "opaque_to_smt" va_qattr]
let va_qcode_Compute_Y0 (va_mods:va_mods_t) : (va_quickCode unit (va_code_Compute_Y0 ())) =
(qblock va_mods (fun (va_s:va_state) -> let (va_old_s:va_state) = va_s in va_QBind va_range1
"***** PRECONDITION NOT MET AT line 83 column 9 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (fun (va_s:va_state) _ -> va_qPURE
va_range1
"***** PRECONDITION NOT MET AT line 84 column 21 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(fun (_:unit) -> Vale.Arch.Types.lemma_quad32_xor ()) (va_QEmpty (())))))
val va_lemma_Compute_Y0 : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Compute_Y0 ()) va_s0 /\ va_get_ok va_s0 /\
sse_enabled))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_get_xmm 1 va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0 /\ va_state_eq
va_sM (va_update_flags va_sM (va_update_xmm 1 va_sM (va_update_ok va_sM va_s0)))))
[@"opaque_to_smt"]
let va_lemma_Compute_Y0 va_b0 va_s0 =
let (va_mods:va_mods_t) = [va_Mod_flags; va_Mod_xmm 1; va_Mod_ok] in
let va_qc = va_qcode_Compute_Y0 va_mods in
let (va_sM, va_fM, va_g) = va_wp_sound_code_norm (va_code_Compute_Y0 ()) va_qc va_s0 (fun va_s0
va_sM va_g -> let () = va_g in label va_range1
"***** POSTCONDITION NOT MET AT line 77 column 1 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_ok va_sM) /\ label va_range1
"***** POSTCONDITION NOT MET AT line 81 column 39 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_xmm 1 va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0)) in
assert_norm (va_qc.mods == va_mods);
va_lemma_norm_mods ([va_Mod_flags; va_Mod_xmm 1; va_Mod_ok]) va_sM va_s0;
(va_sM, va_fM)
[@ va_qattr]
let va_wp_Compute_Y0 (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ sse_enabled /\ (forall (va_x_xmm1:quad32) (va_x_efl:Vale.X64.Flags.t) . let
va_sM = va_upd_flags va_x_efl (va_upd_xmm 1 va_x_xmm1 va_s0) in va_get_ok va_sM /\ va_get_xmm 1
va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0 ==> va_k va_sM (())))
val va_wpProof_Compute_Y0 : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Compute_Y0 va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Compute_Y0 ()) ([va_Mod_flags;
va_Mod_xmm 1]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@"opaque_to_smt"]
let va_wpProof_Compute_Y0 va_s0 va_k =
let (va_sM, va_f0) = va_lemma_Compute_Y0 (va_code_Compute_Y0 ()) va_s0 in
va_lemma_upd_update va_sM;
assert (va_state_eq va_sM (va_update_flags va_sM (va_update_xmm 1 va_sM (va_update_ok va_sM
va_s0))));
va_lemma_norm_mods ([va_Mod_flags; va_Mod_xmm 1]) va_sM va_s0;
let va_g = () in
(va_sM, va_f0, va_g)
[@ "opaque_to_smt" va_qattr]
let va_quick_Compute_Y0 () : (va_quickCode unit (va_code_Compute_Y0 ())) =
(va_QProc (va_code_Compute_Y0 ()) ([va_Mod_flags; va_Mod_xmm 1]) va_wp_Compute_Y0
va_wpProof_Compute_Y0)
//--
//-- ReduceMul128_LE
val va_code_ReduceMul128_LE : va_dummy:unit -> Tot va_code
[@ "opaque_to_smt" va_qattr]
let va_code_ReduceMul128_LE () =
(va_Block (va_CCons (va_code_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_CCons
(va_code_ReduceMulRev128 ()) (va_CCons (va_code_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8))
(va_CNil ())))))
val va_codegen_success_ReduceMul128_LE : va_dummy:unit -> Tot va_pbool
[@ "opaque_to_smt" va_qattr]
let va_codegen_success_ReduceMul128_LE () =
(va_pbool_and (va_codegen_success_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_pbool_and
(va_codegen_success_ReduceMulRev128 ()) (va_pbool_and (va_codegen_success_Pshufb (va_op_xmm_xmm
1) (va_op_xmm_xmm 8)) (va_ttrue ()))))
[@ "opaque_to_smt" va_qattr]
let va_qcode_ReduceMul128_LE (va_mods:va_mods_t) (a:poly) (b:poly) : (va_quickCode unit
(va_code_ReduceMul128_LE ())) =
(qblock va_mods (fun (va_s:va_state) -> let (va_old_s:va_state) = va_s in va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 104 column 11 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 105 column 20 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_ReduceMulRev128 a b) (va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 106 column 11 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_QEmpty (()))))))
val va_lemma_ReduceMul128_LE : va_b0:va_code -> va_s0:va_state -> a:poly -> b:poly
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_ReduceMul128_LE ()) va_s0 /\ va_get_ok va_s0 /\
(pclmulqdq_enabled /\ avx_enabled /\ sse_enabled /\ Vale.Math.Poly2_s.degree a <= 127 /\
Vale.Math.Poly2_s.degree b <= 127 /\ va_get_xmm 1 va_s0 ==
Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32 a) /\ va_get_xmm 2
va_s0 == Vale.AES.GF128_s.gf128_to_quad32 b /\ va_get_xmm 8 va_s0 == Vale.Def.Words_s.Mkfour
#Vale.Def.Types_s.nat32 202182159 134810123 67438087 66051)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_get_xmm 1 va_sM == Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32
(Vale.AES.GF128_s.gf128_mul a b)) /\ va_state_eq va_sM (va_update_xmm 6 va_sM (va_update_xmm 5
va_sM (va_update_xmm 4 va_sM (va_update_xmm 3 va_sM (va_update_xmm 2 va_sM (va_update_xmm 1
va_sM (va_update_reg64 rR12 va_sM (va_update_flags va_sM (va_update_ok va_sM va_s0)))))))))))
[@"opaque_to_smt"]
let va_lemma_ReduceMul128_LE va_b0 va_s0 a b =
let (va_mods:va_mods_t) = [va_Mod_xmm 6; va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2;
va_Mod_xmm 1; va_Mod_reg64 rR12; va_Mod_flags; va_Mod_ok] in
let va_qc = va_qcode_ReduceMul128_LE va_mods a b in
let (va_sM, va_fM, va_g) = va_wp_sound_code_norm (va_code_ReduceMul128_LE ()) va_qc va_s0 (fun
va_s0 va_sM va_g -> let () = va_g in label va_range1
"***** POSTCONDITION NOT MET AT line 87 column 1 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_ok va_sM) /\ label va_range1
"***** POSTCONDITION NOT MET AT line 102 column 71 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_xmm 1 va_sM == Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32
(Vale.AES.GF128_s.gf128_mul a b)))) in
assert_norm (va_qc.mods == va_mods);
va_lemma_norm_mods ([va_Mod_xmm 6; va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2;
va_Mod_xmm 1; va_Mod_reg64 rR12; va_Mod_flags; va_Mod_ok]) va_sM va_s0;
(va_sM, va_fM)
[@ va_qattr]
let va_wp_ReduceMul128_LE (a:poly) (b:poly) (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) :
Type0 =
(va_get_ok va_s0 /\ (pclmulqdq_enabled /\ avx_enabled /\ sse_enabled /\ Vale.Math.Poly2_s.degree
a <= 127 /\ Vale.Math.Poly2_s.degree b <= 127 /\ va_get_xmm 1 va_s0 ==
Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32 a) /\ va_get_xmm 2
va_s0 == Vale.AES.GF128_s.gf128_to_quad32 b /\ va_get_xmm 8 va_s0 == Vale.Def.Words_s.Mkfour
#Vale.Def.Types_s.nat32 202182159 134810123 67438087 66051) /\ (forall
(va_x_efl:Vale.X64.Flags.t) (va_x_r12:nat64) (va_x_xmm1:quad32) (va_x_xmm2:quad32)
(va_x_xmm3:quad32) (va_x_xmm4:quad32) (va_x_xmm5:quad32) (va_x_xmm6:quad32) . let va_sM =
va_upd_xmm 6 va_x_xmm6 (va_upd_xmm 5 va_x_xmm5 (va_upd_xmm 4 va_x_xmm4 (va_upd_xmm 3 va_x_xmm3
(va_upd_xmm 2 va_x_xmm2 (va_upd_xmm 1 va_x_xmm1 (va_upd_reg64 rR12 va_x_r12 (va_upd_flags
va_x_efl va_s0))))))) in va_get_ok va_sM /\ va_get_xmm 1 va_sM ==
Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32
(Vale.AES.GF128_s.gf128_mul a b)) ==> va_k va_sM (())))
val va_wpProof_ReduceMul128_LE : a:poly -> b:poly -> va_s0:va_state -> va_k:(va_state -> unit ->
Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_ReduceMul128_LE a b va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_ReduceMul128_LE ()) ([va_Mod_xmm 6;
va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2; va_Mod_xmm 1; va_Mod_reg64 rR12;
va_Mod_flags]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@"opaque_to_smt"]
let va_wpProof_ReduceMul128_LE a b va_s0 va_k =
let (va_sM, va_f0) = va_lemma_ReduceMul128_LE (va_code_ReduceMul128_LE ()) va_s0 a b in
va_lemma_upd_update va_sM;
assert (va_state_eq va_sM (va_update_xmm 6 va_sM (va_update_xmm 5 va_sM (va_update_xmm 4 va_sM
(va_update_xmm 3 va_sM (va_update_xmm 2 va_sM (va_update_xmm 1 va_sM (va_update_reg64 rR12
va_sM (va_update_flags va_sM (va_update_ok va_sM va_s0))))))))));
va_lemma_norm_mods ([va_Mod_xmm 6; va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2;
va_Mod_xmm 1; va_Mod_reg64 rR12; va_Mod_flags]) va_sM va_s0;
let va_g = () in
(va_sM, va_f0, va_g)
[@ "opaque_to_smt" va_qattr]
let va_quick_ReduceMul128_LE (a:poly) (b:poly) : (va_quickCode unit (va_code_ReduceMul128_LE ())) =
(va_QProc (va_code_ReduceMul128_LE ()) ([va_Mod_xmm 6; va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3;
va_Mod_xmm 2; va_Mod_xmm 1; va_Mod_reg64 rR12; va_Mod_flags]) (va_wp_ReduceMul128_LE a b)
(va_wpProof_ReduceMul128_LE a b))
//--
//-- Compute_ghash_incremental_register
[@ "opaque_to_smt" va_qattr]
let va_code_Compute_ghash_incremental_register () =
(va_Block (va_CCons (va_code_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 2)) (va_CCons (va_code_Mov128
(va_op_xmm_xmm 2) (va_op_xmm_xmm 11)) (va_CCons (va_code_ReduceMul128_LE ()) (va_CNil ())))))
[@ "opaque_to_smt" va_qattr]
let va_codegen_success_Compute_ghash_incremental_register () =
(va_pbool_and (va_codegen_success_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 2)) (va_pbool_and
(va_codegen_success_Mov128 (va_op_xmm_xmm 2) (va_op_xmm_xmm 11)) (va_pbool_and
(va_codegen_success_ReduceMul128_LE ()) (va_ttrue ()))))
[@ "opaque_to_smt" va_qattr]
let va_qcode_Compute_ghash_incremental_register (va_mods:va_mods_t) : (va_quickCode unit | false | false | Vale.AES.X64.GHash.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 30,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val va_qcode_Compute_ghash_incremental_register (va_mods: va_mods_t)
: (va_quickCode unit (va_code_Compute_ghash_incremental_register ())) | [] | Vale.AES.X64.GHash.va_qcode_Compute_ghash_incremental_register | {
"file_name": "obj/Vale.AES.X64.GHash.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | va_mods: Vale.X64.QuickCode.va_mods_t
-> Vale.X64.QuickCode.va_quickCode Prims.unit
(Vale.AES.X64.GHash.va_code_Compute_ghash_incremental_register ()) | {
"end_col": 86,
"end_line": 226,
"start_col": 2,
"start_line": 215
} |
Prims.Tot | val va_qcode_Compute_Y0 (va_mods: va_mods_t) : (va_quickCode unit (va_code_Compute_Y0 ())) | [
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64.GF128_Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Poly1305.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64.GF128_Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Poly1305.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let va_qcode_Compute_Y0 (va_mods:va_mods_t) : (va_quickCode unit (va_code_Compute_Y0 ())) =
(qblock va_mods (fun (va_s:va_state) -> let (va_old_s:va_state) = va_s in va_QBind va_range1
"***** PRECONDITION NOT MET AT line 83 column 9 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (fun (va_s:va_state) _ -> va_qPURE
va_range1
"***** PRECONDITION NOT MET AT line 84 column 21 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(fun (_:unit) -> Vale.Arch.Types.lemma_quad32_xor ()) (va_QEmpty (()))))) | val va_qcode_Compute_Y0 (va_mods: va_mods_t) : (va_quickCode unit (va_code_Compute_Y0 ()))
let va_qcode_Compute_Y0 (va_mods: va_mods_t) : (va_quickCode unit (va_code_Compute_Y0 ())) = | false | null | false | (qblock va_mods
(fun (va_s: va_state) ->
let va_old_s:va_state = va_s in
va_QBind va_range1
"***** PRECONDITION NOT MET AT line 83 column 9 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1))
(fun (va_s: va_state) _ ->
va_qPURE va_range1
"***** PRECONDITION NOT MET AT line 84 column 21 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(fun (_: unit) -> Vale.Arch.Types.lemma_quad32_xor ())
(va_QEmpty (()))))) | {
"checked_file": "Vale.AES.X64.GHash.fst.checked",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.QuickCodes.fsti.checked",
"Vale.X64.QuickCode.fst.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.InsVector.fsti.checked",
"Vale.X64.InsMem.fsti.checked",
"Vale.X64.InsBasic.fsti.checked",
"Vale.X64.InsAes.fsti.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.X64.Decls.fsti.checked",
"Vale.X64.CPU_Features_s.fst.checked",
"Vale.Poly1305.Math.fsti.checked",
"Vale.Math.Poly2_s.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.X64.GF128_Mul.fsti.checked",
"Vale.AES.GHash_s.fst.checked",
"Vale.AES.GHash.fsti.checked",
"Vale.AES.GF128_s.fsti.checked",
"Vale.AES.GF128.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.Seq.Base.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.X64.GHash.fst"
} | [
"total"
] | [
"Vale.X64.QuickCode.va_mods_t",
"Vale.X64.QuickCodes.qblock",
"Prims.unit",
"Prims.Cons",
"Vale.X64.Decls.va_code",
"Vale.X64.InsVector.va_code_Pxor",
"Vale.X64.Decls.va_op_xmm_xmm",
"Prims.Nil",
"Vale.X64.Machine_s.precode",
"Vale.X64.Decls.ins",
"Vale.X64.Decls.ocmp",
"Vale.X64.Decls.va_state",
"Vale.X64.QuickCodes.va_QBind",
"Vale.X64.QuickCodes.va_range1",
"Vale.X64.InsVector.va_quick_Pxor",
"Vale.X64.QuickCodes.va_qPURE",
"Prims.pure_post",
"Prims.l_and",
"Prims.l_True",
"Prims.l_Forall",
"Prims.l_imp",
"Vale.Def.Types_s.quad32",
"Prims.eq2",
"Vale.Def.Words_s.four",
"Vale.Def.Words_s.nat32",
"Vale.Def.Types_s.quad32_xor",
"Vale.Def.Words_s.Mkfour",
"Vale.Arch.Types.lemma_quad32_xor",
"Vale.X64.QuickCodes.va_QEmpty",
"Vale.X64.QuickCodes.quickCodes",
"Vale.X64.State.vale_state",
"Vale.X64.QuickCode.va_quickCode",
"Vale.AES.X64.GHash.va_code_Compute_Y0"
] | [] | module Vale.AES.X64.GHash
open Vale.Def.Opaque_s
open FStar.Seq
open Vale.Def.Words_s
open Vale.Def.Types_s
open Vale.Arch.Types
open Vale.AES.AES_s
open Vale.AES.GHash_s
open Vale.AES.GHash
open Vale.AES.GF128_s
open Vale.AES.GF128
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open Vale.Math.Poly2_s
open Vale.Poly1305.Math
open Vale.AES.X64.GF128_Mul
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.X64.State
open Vale.X64.Decls
open Vale.X64.InsBasic
open Vale.X64.InsMem
open Vale.X64.InsVector
open Vale.X64.InsAes
open Vale.X64.QuickCode
open Vale.X64.QuickCodes
open Vale.X64.CPU_Features_s
#reset-options "--z3rlimit 30"
//-- Compute_Y0
val va_code_Compute_Y0 : va_dummy:unit -> Tot va_code
[@ "opaque_to_smt" va_qattr]
let va_code_Compute_Y0 () =
(va_Block (va_CCons (va_code_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (va_CNil ())))
val va_codegen_success_Compute_Y0 : va_dummy:unit -> Tot va_pbool
[@ "opaque_to_smt" va_qattr]
let va_codegen_success_Compute_Y0 () =
(va_pbool_and (va_codegen_success_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (va_ttrue ()))
[@ "opaque_to_smt" va_qattr] | false | false | Vale.AES.X64.GHash.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 30,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val va_qcode_Compute_Y0 (va_mods: va_mods_t) : (va_quickCode unit (va_code_Compute_Y0 ())) | [] | Vale.AES.X64.GHash.va_qcode_Compute_Y0 | {
"file_name": "obj/Vale.AES.X64.GHash.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | va_mods: Vale.X64.QuickCode.va_mods_t
-> Vale.X64.QuickCode.va_quickCode Prims.unit (Vale.AES.X64.GHash.va_code_Compute_Y0 ()) | {
"end_col": 77,
"end_line": 48,
"start_col": 2,
"start_line": 43
} |
Prims.Ghost | val va_lemma_Compute_Y0 : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Compute_Y0 ()) va_s0 /\ va_get_ok va_s0 /\
sse_enabled))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_get_xmm 1 va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0 /\ va_state_eq
va_sM (va_update_flags va_sM (va_update_xmm 1 va_sM (va_update_ok va_sM va_s0))))) | [
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64.GF128_Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Poly1305.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64.GF128_Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Poly1305.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let va_lemma_Compute_Y0 va_b0 va_s0 =
let (va_mods:va_mods_t) = [va_Mod_flags; va_Mod_xmm 1; va_Mod_ok] in
let va_qc = va_qcode_Compute_Y0 va_mods in
let (va_sM, va_fM, va_g) = va_wp_sound_code_norm (va_code_Compute_Y0 ()) va_qc va_s0 (fun va_s0
va_sM va_g -> let () = va_g in label va_range1
"***** POSTCONDITION NOT MET AT line 77 column 1 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_ok va_sM) /\ label va_range1
"***** POSTCONDITION NOT MET AT line 81 column 39 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_xmm 1 va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0)) in
assert_norm (va_qc.mods == va_mods);
va_lemma_norm_mods ([va_Mod_flags; va_Mod_xmm 1; va_Mod_ok]) va_sM va_s0;
(va_sM, va_fM) | val va_lemma_Compute_Y0 : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Compute_Y0 ()) va_s0 /\ va_get_ok va_s0 /\
sse_enabled))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_get_xmm 1 va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0 /\ va_state_eq
va_sM (va_update_flags va_sM (va_update_xmm 1 va_sM (va_update_ok va_sM va_s0)))))
let va_lemma_Compute_Y0 va_b0 va_s0 = | false | null | false | let va_mods:va_mods_t = [va_Mod_flags; va_Mod_xmm 1; va_Mod_ok] in
let va_qc = va_qcode_Compute_Y0 va_mods in
let va_sM, va_fM, va_g =
va_wp_sound_code_norm (va_code_Compute_Y0 ())
va_qc
va_s0
(fun va_s0 va_sM va_g ->
let () = va_g in
label va_range1
"***** POSTCONDITION NOT MET AT line 77 column 1 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_ok va_sM) /\
label va_range1
"***** POSTCONDITION NOT MET AT line 81 column 39 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_xmm 1 va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0))
in
assert_norm (va_qc.mods == va_mods);
va_lemma_norm_mods ([va_Mod_flags; va_Mod_xmm 1; va_Mod_ok]) va_sM va_s0;
(va_sM, va_fM) | {
"checked_file": "Vale.AES.X64.GHash.fst.checked",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.QuickCodes.fsti.checked",
"Vale.X64.QuickCode.fst.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.InsVector.fsti.checked",
"Vale.X64.InsMem.fsti.checked",
"Vale.X64.InsBasic.fsti.checked",
"Vale.X64.InsAes.fsti.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.X64.Decls.fsti.checked",
"Vale.X64.CPU_Features_s.fst.checked",
"Vale.Poly1305.Math.fsti.checked",
"Vale.Math.Poly2_s.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.X64.GF128_Mul.fsti.checked",
"Vale.AES.GHash_s.fst.checked",
"Vale.AES.GHash.fsti.checked",
"Vale.AES.GF128_s.fsti.checked",
"Vale.AES.GF128.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.Seq.Base.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.X64.GHash.fst"
} | [] | [
"Vale.X64.Decls.va_code",
"Vale.X64.Decls.va_state",
"Vale.X64.QuickCodes.fuel",
"Prims.unit",
"FStar.Pervasives.Native.Mktuple2",
"Vale.X64.Decls.va_fuel",
"Vale.X64.QuickCode.va_lemma_norm_mods",
"Prims.Cons",
"Vale.X64.QuickCode.mod_t",
"Vale.X64.QuickCode.va_Mod_flags",
"Vale.X64.QuickCode.va_Mod_xmm",
"Vale.X64.QuickCode.va_Mod_ok",
"Prims.Nil",
"FStar.Pervasives.assert_norm",
"Prims.eq2",
"Prims.list",
"Vale.X64.QuickCode.__proj__QProc__item__mods",
"Vale.AES.X64.GHash.va_code_Compute_Y0",
"FStar.Pervasives.Native.tuple2",
"FStar.Pervasives.Native.tuple3",
"Vale.X64.State.vale_state",
"Vale.X64.QuickCodes.va_wp_sound_code_norm",
"Prims.l_and",
"Vale.X64.QuickCodes.label",
"Vale.X64.QuickCodes.va_range1",
"Prims.b2t",
"Vale.X64.Decls.va_get_ok",
"Vale.Def.Words_s.four",
"Vale.Def.Types_s.nat32",
"Vale.X64.Decls.va_get_xmm",
"Vale.Def.Words_s.Mkfour",
"Vale.X64.QuickCode.quickCode",
"Vale.AES.X64.GHash.va_qcode_Compute_Y0"
] | [] | module Vale.AES.X64.GHash
open Vale.Def.Opaque_s
open FStar.Seq
open Vale.Def.Words_s
open Vale.Def.Types_s
open Vale.Arch.Types
open Vale.AES.AES_s
open Vale.AES.GHash_s
open Vale.AES.GHash
open Vale.AES.GF128_s
open Vale.AES.GF128
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open Vale.Math.Poly2_s
open Vale.Poly1305.Math
open Vale.AES.X64.GF128_Mul
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.X64.State
open Vale.X64.Decls
open Vale.X64.InsBasic
open Vale.X64.InsMem
open Vale.X64.InsVector
open Vale.X64.InsAes
open Vale.X64.QuickCode
open Vale.X64.QuickCodes
open Vale.X64.CPU_Features_s
#reset-options "--z3rlimit 30"
//-- Compute_Y0
val va_code_Compute_Y0 : va_dummy:unit -> Tot va_code
[@ "opaque_to_smt" va_qattr]
let va_code_Compute_Y0 () =
(va_Block (va_CCons (va_code_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (va_CNil ())))
val va_codegen_success_Compute_Y0 : va_dummy:unit -> Tot va_pbool
[@ "opaque_to_smt" va_qattr]
let va_codegen_success_Compute_Y0 () =
(va_pbool_and (va_codegen_success_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (va_ttrue ()))
[@ "opaque_to_smt" va_qattr]
let va_qcode_Compute_Y0 (va_mods:va_mods_t) : (va_quickCode unit (va_code_Compute_Y0 ())) =
(qblock va_mods (fun (va_s:va_state) -> let (va_old_s:va_state) = va_s in va_QBind va_range1
"***** PRECONDITION NOT MET AT line 83 column 9 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (fun (va_s:va_state) _ -> va_qPURE
va_range1
"***** PRECONDITION NOT MET AT line 84 column 21 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(fun (_:unit) -> Vale.Arch.Types.lemma_quad32_xor ()) (va_QEmpty (())))))
val va_lemma_Compute_Y0 : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Compute_Y0 ()) va_s0 /\ va_get_ok va_s0 /\
sse_enabled))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_get_xmm 1 va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0 /\ va_state_eq
va_sM (va_update_flags va_sM (va_update_xmm 1 va_sM (va_update_ok va_sM va_s0))))) | false | false | Vale.AES.X64.GHash.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 30,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val va_lemma_Compute_Y0 : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Compute_Y0 ()) va_s0 /\ va_get_ok va_s0 /\
sse_enabled))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_get_xmm 1 va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0 /\ va_state_eq
va_sM (va_update_flags va_sM (va_update_xmm 1 va_sM (va_update_ok va_sM va_s0))))) | [] | Vale.AES.X64.GHash.va_lemma_Compute_Y0 | {
"file_name": "obj/Vale.AES.X64.GHash.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | va_b0: Vale.X64.Decls.va_code -> va_s0: Vale.X64.Decls.va_state
-> Prims.Ghost (Vale.X64.Decls.va_state * Vale.X64.Decls.va_fuel) | {
"end_col": 16,
"end_line": 70,
"start_col": 37,
"start_line": 59
} |
Prims.Ghost | val va_wpProof_ReduceMul128_LE : a:poly -> b:poly -> va_s0:va_state -> va_k:(va_state -> unit ->
Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_ReduceMul128_LE a b va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_ReduceMul128_LE ()) ([va_Mod_xmm 6;
va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2; va_Mod_xmm 1; va_Mod_reg64 rR12;
va_Mod_flags]) va_s0 va_k ((va_sM, va_f0, va_g)))) | [
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64.GF128_Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Poly1305.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64.GF128_Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Poly1305.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let va_wpProof_ReduceMul128_LE a b va_s0 va_k =
let (va_sM, va_f0) = va_lemma_ReduceMul128_LE (va_code_ReduceMul128_LE ()) va_s0 a b in
va_lemma_upd_update va_sM;
assert (va_state_eq va_sM (va_update_xmm 6 va_sM (va_update_xmm 5 va_sM (va_update_xmm 4 va_sM
(va_update_xmm 3 va_sM (va_update_xmm 2 va_sM (va_update_xmm 1 va_sM (va_update_reg64 rR12
va_sM (va_update_flags va_sM (va_update_ok va_sM va_s0))))))))));
va_lemma_norm_mods ([va_Mod_xmm 6; va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2;
va_Mod_xmm 1; va_Mod_reg64 rR12; va_Mod_flags]) va_sM va_s0;
let va_g = () in
(va_sM, va_f0, va_g) | val va_wpProof_ReduceMul128_LE : a:poly -> b:poly -> va_s0:va_state -> va_k:(va_state -> unit ->
Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_ReduceMul128_LE a b va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_ReduceMul128_LE ()) ([va_Mod_xmm 6;
va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2; va_Mod_xmm 1; va_Mod_reg64 rR12;
va_Mod_flags]) va_s0 va_k ((va_sM, va_f0, va_g))))
let va_wpProof_ReduceMul128_LE a b va_s0 va_k = | false | null | false | let va_sM, va_f0 = va_lemma_ReduceMul128_LE (va_code_ReduceMul128_LE ()) va_s0 a b in
va_lemma_upd_update va_sM;
assert (va_state_eq va_sM
(va_update_xmm 6
va_sM
(va_update_xmm 5
va_sM
(va_update_xmm 4
va_sM
(va_update_xmm 3
va_sM
(va_update_xmm 2
va_sM
(va_update_xmm 1
va_sM
(va_update_reg64 rR12
va_sM
(va_update_flags va_sM (va_update_ok va_sM va_s0))))))))));
va_lemma_norm_mods ([
va_Mod_xmm 6;
va_Mod_xmm 5;
va_Mod_xmm 4;
va_Mod_xmm 3;
va_Mod_xmm 2;
va_Mod_xmm 1;
va_Mod_reg64 rR12;
va_Mod_flags
])
va_sM
va_s0;
let va_g = () in
(va_sM, va_f0, va_g) | {
"checked_file": "Vale.AES.X64.GHash.fst.checked",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.QuickCodes.fsti.checked",
"Vale.X64.QuickCode.fst.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.InsVector.fsti.checked",
"Vale.X64.InsMem.fsti.checked",
"Vale.X64.InsBasic.fsti.checked",
"Vale.X64.InsAes.fsti.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.X64.Decls.fsti.checked",
"Vale.X64.CPU_Features_s.fst.checked",
"Vale.Poly1305.Math.fsti.checked",
"Vale.Math.Poly2_s.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.X64.GF128_Mul.fsti.checked",
"Vale.AES.GHash_s.fst.checked",
"Vale.AES.GHash.fsti.checked",
"Vale.AES.GF128_s.fsti.checked",
"Vale.AES.GF128.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.Seq.Base.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.X64.GHash.fst"
} | [] | [
"Vale.Math.Poly2_s.poly",
"Vale.X64.Decls.va_state",
"Prims.unit",
"Vale.X64.Decls.va_fuel",
"FStar.Pervasives.Native.Mktuple3",
"Vale.X64.QuickCode.va_lemma_norm_mods",
"Prims.Cons",
"Vale.X64.QuickCode.mod_t",
"Vale.X64.QuickCode.va_Mod_xmm",
"Vale.X64.QuickCode.va_Mod_reg64",
"Vale.X64.Machine_s.rR12",
"Vale.X64.QuickCode.va_Mod_flags",
"Prims.Nil",
"Prims._assert",
"Vale.X64.Decls.va_state_eq",
"Vale.X64.Decls.va_update_xmm",
"Vale.X64.Decls.va_update_reg64",
"Vale.X64.Decls.va_update_flags",
"Vale.X64.Decls.va_update_ok",
"Vale.X64.Decls.va_lemma_upd_update",
"FStar.Pervasives.Native.tuple3",
"FStar.Pervasives.Native.tuple2",
"Vale.X64.State.vale_state",
"Vale.AES.X64.GHash.va_lemma_ReduceMul128_LE",
"Vale.AES.X64.GHash.va_code_ReduceMul128_LE"
] | [] | module Vale.AES.X64.GHash
open Vale.Def.Opaque_s
open FStar.Seq
open Vale.Def.Words_s
open Vale.Def.Types_s
open Vale.Arch.Types
open Vale.AES.AES_s
open Vale.AES.GHash_s
open Vale.AES.GHash
open Vale.AES.GF128_s
open Vale.AES.GF128
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open Vale.Math.Poly2_s
open Vale.Poly1305.Math
open Vale.AES.X64.GF128_Mul
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.X64.State
open Vale.X64.Decls
open Vale.X64.InsBasic
open Vale.X64.InsMem
open Vale.X64.InsVector
open Vale.X64.InsAes
open Vale.X64.QuickCode
open Vale.X64.QuickCodes
open Vale.X64.CPU_Features_s
#reset-options "--z3rlimit 30"
//-- Compute_Y0
val va_code_Compute_Y0 : va_dummy:unit -> Tot va_code
[@ "opaque_to_smt" va_qattr]
let va_code_Compute_Y0 () =
(va_Block (va_CCons (va_code_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (va_CNil ())))
val va_codegen_success_Compute_Y0 : va_dummy:unit -> Tot va_pbool
[@ "opaque_to_smt" va_qattr]
let va_codegen_success_Compute_Y0 () =
(va_pbool_and (va_codegen_success_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (va_ttrue ()))
[@ "opaque_to_smt" va_qattr]
let va_qcode_Compute_Y0 (va_mods:va_mods_t) : (va_quickCode unit (va_code_Compute_Y0 ())) =
(qblock va_mods (fun (va_s:va_state) -> let (va_old_s:va_state) = va_s in va_QBind va_range1
"***** PRECONDITION NOT MET AT line 83 column 9 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (fun (va_s:va_state) _ -> va_qPURE
va_range1
"***** PRECONDITION NOT MET AT line 84 column 21 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(fun (_:unit) -> Vale.Arch.Types.lemma_quad32_xor ()) (va_QEmpty (())))))
val va_lemma_Compute_Y0 : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Compute_Y0 ()) va_s0 /\ va_get_ok va_s0 /\
sse_enabled))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_get_xmm 1 va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0 /\ va_state_eq
va_sM (va_update_flags va_sM (va_update_xmm 1 va_sM (va_update_ok va_sM va_s0)))))
[@"opaque_to_smt"]
let va_lemma_Compute_Y0 va_b0 va_s0 =
let (va_mods:va_mods_t) = [va_Mod_flags; va_Mod_xmm 1; va_Mod_ok] in
let va_qc = va_qcode_Compute_Y0 va_mods in
let (va_sM, va_fM, va_g) = va_wp_sound_code_norm (va_code_Compute_Y0 ()) va_qc va_s0 (fun va_s0
va_sM va_g -> let () = va_g in label va_range1
"***** POSTCONDITION NOT MET AT line 77 column 1 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_ok va_sM) /\ label va_range1
"***** POSTCONDITION NOT MET AT line 81 column 39 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_xmm 1 va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0)) in
assert_norm (va_qc.mods == va_mods);
va_lemma_norm_mods ([va_Mod_flags; va_Mod_xmm 1; va_Mod_ok]) va_sM va_s0;
(va_sM, va_fM)
[@ va_qattr]
let va_wp_Compute_Y0 (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ sse_enabled /\ (forall (va_x_xmm1:quad32) (va_x_efl:Vale.X64.Flags.t) . let
va_sM = va_upd_flags va_x_efl (va_upd_xmm 1 va_x_xmm1 va_s0) in va_get_ok va_sM /\ va_get_xmm 1
va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0 ==> va_k va_sM (())))
val va_wpProof_Compute_Y0 : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Compute_Y0 va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Compute_Y0 ()) ([va_Mod_flags;
va_Mod_xmm 1]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@"opaque_to_smt"]
let va_wpProof_Compute_Y0 va_s0 va_k =
let (va_sM, va_f0) = va_lemma_Compute_Y0 (va_code_Compute_Y0 ()) va_s0 in
va_lemma_upd_update va_sM;
assert (va_state_eq va_sM (va_update_flags va_sM (va_update_xmm 1 va_sM (va_update_ok va_sM
va_s0))));
va_lemma_norm_mods ([va_Mod_flags; va_Mod_xmm 1]) va_sM va_s0;
let va_g = () in
(va_sM, va_f0, va_g)
[@ "opaque_to_smt" va_qattr]
let va_quick_Compute_Y0 () : (va_quickCode unit (va_code_Compute_Y0 ())) =
(va_QProc (va_code_Compute_Y0 ()) ([va_Mod_flags; va_Mod_xmm 1]) va_wp_Compute_Y0
va_wpProof_Compute_Y0)
//--
//-- ReduceMul128_LE
val va_code_ReduceMul128_LE : va_dummy:unit -> Tot va_code
[@ "opaque_to_smt" va_qattr]
let va_code_ReduceMul128_LE () =
(va_Block (va_CCons (va_code_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_CCons
(va_code_ReduceMulRev128 ()) (va_CCons (va_code_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8))
(va_CNil ())))))
val va_codegen_success_ReduceMul128_LE : va_dummy:unit -> Tot va_pbool
[@ "opaque_to_smt" va_qattr]
let va_codegen_success_ReduceMul128_LE () =
(va_pbool_and (va_codegen_success_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_pbool_and
(va_codegen_success_ReduceMulRev128 ()) (va_pbool_and (va_codegen_success_Pshufb (va_op_xmm_xmm
1) (va_op_xmm_xmm 8)) (va_ttrue ()))))
[@ "opaque_to_smt" va_qattr]
let va_qcode_ReduceMul128_LE (va_mods:va_mods_t) (a:poly) (b:poly) : (va_quickCode unit
(va_code_ReduceMul128_LE ())) =
(qblock va_mods (fun (va_s:va_state) -> let (va_old_s:va_state) = va_s in va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 104 column 11 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 105 column 20 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_ReduceMulRev128 a b) (va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 106 column 11 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_QEmpty (()))))))
val va_lemma_ReduceMul128_LE : va_b0:va_code -> va_s0:va_state -> a:poly -> b:poly
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_ReduceMul128_LE ()) va_s0 /\ va_get_ok va_s0 /\
(pclmulqdq_enabled /\ avx_enabled /\ sse_enabled /\ Vale.Math.Poly2_s.degree a <= 127 /\
Vale.Math.Poly2_s.degree b <= 127 /\ va_get_xmm 1 va_s0 ==
Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32 a) /\ va_get_xmm 2
va_s0 == Vale.AES.GF128_s.gf128_to_quad32 b /\ va_get_xmm 8 va_s0 == Vale.Def.Words_s.Mkfour
#Vale.Def.Types_s.nat32 202182159 134810123 67438087 66051)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_get_xmm 1 va_sM == Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32
(Vale.AES.GF128_s.gf128_mul a b)) /\ va_state_eq va_sM (va_update_xmm 6 va_sM (va_update_xmm 5
va_sM (va_update_xmm 4 va_sM (va_update_xmm 3 va_sM (va_update_xmm 2 va_sM (va_update_xmm 1
va_sM (va_update_reg64 rR12 va_sM (va_update_flags va_sM (va_update_ok va_sM va_s0)))))))))))
[@"opaque_to_smt"]
let va_lemma_ReduceMul128_LE va_b0 va_s0 a b =
let (va_mods:va_mods_t) = [va_Mod_xmm 6; va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2;
va_Mod_xmm 1; va_Mod_reg64 rR12; va_Mod_flags; va_Mod_ok] in
let va_qc = va_qcode_ReduceMul128_LE va_mods a b in
let (va_sM, va_fM, va_g) = va_wp_sound_code_norm (va_code_ReduceMul128_LE ()) va_qc va_s0 (fun
va_s0 va_sM va_g -> let () = va_g in label va_range1
"***** POSTCONDITION NOT MET AT line 87 column 1 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_ok va_sM) /\ label va_range1
"***** POSTCONDITION NOT MET AT line 102 column 71 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_xmm 1 va_sM == Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32
(Vale.AES.GF128_s.gf128_mul a b)))) in
assert_norm (va_qc.mods == va_mods);
va_lemma_norm_mods ([va_Mod_xmm 6; va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2;
va_Mod_xmm 1; va_Mod_reg64 rR12; va_Mod_flags; va_Mod_ok]) va_sM va_s0;
(va_sM, va_fM)
[@ va_qattr]
let va_wp_ReduceMul128_LE (a:poly) (b:poly) (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) :
Type0 =
(va_get_ok va_s0 /\ (pclmulqdq_enabled /\ avx_enabled /\ sse_enabled /\ Vale.Math.Poly2_s.degree
a <= 127 /\ Vale.Math.Poly2_s.degree b <= 127 /\ va_get_xmm 1 va_s0 ==
Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32 a) /\ va_get_xmm 2
va_s0 == Vale.AES.GF128_s.gf128_to_quad32 b /\ va_get_xmm 8 va_s0 == Vale.Def.Words_s.Mkfour
#Vale.Def.Types_s.nat32 202182159 134810123 67438087 66051) /\ (forall
(va_x_efl:Vale.X64.Flags.t) (va_x_r12:nat64) (va_x_xmm1:quad32) (va_x_xmm2:quad32)
(va_x_xmm3:quad32) (va_x_xmm4:quad32) (va_x_xmm5:quad32) (va_x_xmm6:quad32) . let va_sM =
va_upd_xmm 6 va_x_xmm6 (va_upd_xmm 5 va_x_xmm5 (va_upd_xmm 4 va_x_xmm4 (va_upd_xmm 3 va_x_xmm3
(va_upd_xmm 2 va_x_xmm2 (va_upd_xmm 1 va_x_xmm1 (va_upd_reg64 rR12 va_x_r12 (va_upd_flags
va_x_efl va_s0))))))) in va_get_ok va_sM /\ va_get_xmm 1 va_sM ==
Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32
(Vale.AES.GF128_s.gf128_mul a b)) ==> va_k va_sM (())))
val va_wpProof_ReduceMul128_LE : a:poly -> b:poly -> va_s0:va_state -> va_k:(va_state -> unit ->
Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_ReduceMul128_LE a b va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_ReduceMul128_LE ()) ([va_Mod_xmm 6;
va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2; va_Mod_xmm 1; va_Mod_reg64 rR12;
va_Mod_flags]) va_s0 va_k ((va_sM, va_f0, va_g)))) | false | false | Vale.AES.X64.GHash.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 30,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val va_wpProof_ReduceMul128_LE : a:poly -> b:poly -> va_s0:va_state -> va_k:(va_state -> unit ->
Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_ReduceMul128_LE a b va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_ReduceMul128_LE ()) ([va_Mod_xmm 6;
va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2; va_Mod_xmm 1; va_Mod_reg64 rR12;
va_Mod_flags]) va_s0 va_k ((va_sM, va_f0, va_g)))) | [] | Vale.AES.X64.GHash.va_wpProof_ReduceMul128_LE | {
"file_name": "obj/Vale.AES.X64.GHash.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
a: Vale.Math.Poly2_s.poly ->
b: Vale.Math.Poly2_s.poly ->
va_s0: Vale.X64.Decls.va_state ->
va_k: (_: Vale.X64.Decls.va_state -> _: Prims.unit -> Type0)
-> Prims.Ghost ((Vale.X64.Decls.va_state * Vale.X64.Decls.va_fuel) * Prims.unit) | {
"end_col": 22,
"end_line": 191,
"start_col": 47,
"start_line": 182
} |
Prims.Ghost | val va_lemma_ReduceMul128_LE : va_b0:va_code -> va_s0:va_state -> a:poly -> b:poly
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_ReduceMul128_LE ()) va_s0 /\ va_get_ok va_s0 /\
(pclmulqdq_enabled /\ avx_enabled /\ sse_enabled /\ Vale.Math.Poly2_s.degree a <= 127 /\
Vale.Math.Poly2_s.degree b <= 127 /\ va_get_xmm 1 va_s0 ==
Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32 a) /\ va_get_xmm 2
va_s0 == Vale.AES.GF128_s.gf128_to_quad32 b /\ va_get_xmm 8 va_s0 == Vale.Def.Words_s.Mkfour
#Vale.Def.Types_s.nat32 202182159 134810123 67438087 66051)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_get_xmm 1 va_sM == Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32
(Vale.AES.GF128_s.gf128_mul a b)) /\ va_state_eq va_sM (va_update_xmm 6 va_sM (va_update_xmm 5
va_sM (va_update_xmm 4 va_sM (va_update_xmm 3 va_sM (va_update_xmm 2 va_sM (va_update_xmm 1
va_sM (va_update_reg64 rR12 va_sM (va_update_flags va_sM (va_update_ok va_sM va_s0))))))))))) | [
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64.GF128_Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Poly1305.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64.GF128_Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Poly1305.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let va_lemma_ReduceMul128_LE va_b0 va_s0 a b =
let (va_mods:va_mods_t) = [va_Mod_xmm 6; va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2;
va_Mod_xmm 1; va_Mod_reg64 rR12; va_Mod_flags; va_Mod_ok] in
let va_qc = va_qcode_ReduceMul128_LE va_mods a b in
let (va_sM, va_fM, va_g) = va_wp_sound_code_norm (va_code_ReduceMul128_LE ()) va_qc va_s0 (fun
va_s0 va_sM va_g -> let () = va_g in label va_range1
"***** POSTCONDITION NOT MET AT line 87 column 1 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_ok va_sM) /\ label va_range1
"***** POSTCONDITION NOT MET AT line 102 column 71 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_xmm 1 va_sM == Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32
(Vale.AES.GF128_s.gf128_mul a b)))) in
assert_norm (va_qc.mods == va_mods);
va_lemma_norm_mods ([va_Mod_xmm 6; va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2;
va_Mod_xmm 1; va_Mod_reg64 rR12; va_Mod_flags; va_Mod_ok]) va_sM va_s0;
(va_sM, va_fM) | val va_lemma_ReduceMul128_LE : va_b0:va_code -> va_s0:va_state -> a:poly -> b:poly
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_ReduceMul128_LE ()) va_s0 /\ va_get_ok va_s0 /\
(pclmulqdq_enabled /\ avx_enabled /\ sse_enabled /\ Vale.Math.Poly2_s.degree a <= 127 /\
Vale.Math.Poly2_s.degree b <= 127 /\ va_get_xmm 1 va_s0 ==
Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32 a) /\ va_get_xmm 2
va_s0 == Vale.AES.GF128_s.gf128_to_quad32 b /\ va_get_xmm 8 va_s0 == Vale.Def.Words_s.Mkfour
#Vale.Def.Types_s.nat32 202182159 134810123 67438087 66051)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_get_xmm 1 va_sM == Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32
(Vale.AES.GF128_s.gf128_mul a b)) /\ va_state_eq va_sM (va_update_xmm 6 va_sM (va_update_xmm 5
va_sM (va_update_xmm 4 va_sM (va_update_xmm 3 va_sM (va_update_xmm 2 va_sM (va_update_xmm 1
va_sM (va_update_reg64 rR12 va_sM (va_update_flags va_sM (va_update_ok va_sM va_s0)))))))))))
let va_lemma_ReduceMul128_LE va_b0 va_s0 a b = | false | null | false | let va_mods:va_mods_t =
[
va_Mod_xmm 6;
va_Mod_xmm 5;
va_Mod_xmm 4;
va_Mod_xmm 3;
va_Mod_xmm 2;
va_Mod_xmm 1;
va_Mod_reg64 rR12;
va_Mod_flags;
va_Mod_ok
]
in
let va_qc = va_qcode_ReduceMul128_LE va_mods a b in
let va_sM, va_fM, va_g =
va_wp_sound_code_norm (va_code_ReduceMul128_LE ())
va_qc
va_s0
(fun va_s0 va_sM va_g ->
let () = va_g in
label va_range1
"***** POSTCONDITION NOT MET AT line 87 column 1 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_ok va_sM) /\
label va_range1
"***** POSTCONDITION NOT MET AT line 102 column 71 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_xmm 1 va_sM ==
Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32 (Vale.AES.GF128_s.gf128_mul
a
b))))
in
assert_norm (va_qc.mods == va_mods);
va_lemma_norm_mods ([
va_Mod_xmm 6;
va_Mod_xmm 5;
va_Mod_xmm 4;
va_Mod_xmm 3;
va_Mod_xmm 2;
va_Mod_xmm 1;
va_Mod_reg64 rR12;
va_Mod_flags;
va_Mod_ok
])
va_sM
va_s0;
(va_sM, va_fM) | {
"checked_file": "Vale.AES.X64.GHash.fst.checked",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.QuickCodes.fsti.checked",
"Vale.X64.QuickCode.fst.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.InsVector.fsti.checked",
"Vale.X64.InsMem.fsti.checked",
"Vale.X64.InsBasic.fsti.checked",
"Vale.X64.InsAes.fsti.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.X64.Decls.fsti.checked",
"Vale.X64.CPU_Features_s.fst.checked",
"Vale.Poly1305.Math.fsti.checked",
"Vale.Math.Poly2_s.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.X64.GF128_Mul.fsti.checked",
"Vale.AES.GHash_s.fst.checked",
"Vale.AES.GHash.fsti.checked",
"Vale.AES.GF128_s.fsti.checked",
"Vale.AES.GF128.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.Seq.Base.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.X64.GHash.fst"
} | [] | [
"Vale.X64.Decls.va_code",
"Vale.X64.Decls.va_state",
"Vale.Math.Poly2_s.poly",
"Vale.X64.QuickCodes.fuel",
"Prims.unit",
"FStar.Pervasives.Native.Mktuple2",
"Vale.X64.Decls.va_fuel",
"Vale.X64.QuickCode.va_lemma_norm_mods",
"Prims.Cons",
"Vale.X64.QuickCode.mod_t",
"Vale.X64.QuickCode.va_Mod_xmm",
"Vale.X64.QuickCode.va_Mod_reg64",
"Vale.X64.Machine_s.rR12",
"Vale.X64.QuickCode.va_Mod_flags",
"Vale.X64.QuickCode.va_Mod_ok",
"Prims.Nil",
"FStar.Pervasives.assert_norm",
"Prims.eq2",
"Prims.list",
"Vale.X64.QuickCode.__proj__QProc__item__mods",
"Vale.AES.X64.GHash.va_code_ReduceMul128_LE",
"FStar.Pervasives.Native.tuple2",
"FStar.Pervasives.Native.tuple3",
"Vale.X64.State.vale_state",
"Vale.X64.QuickCodes.va_wp_sound_code_norm",
"Prims.l_and",
"Vale.X64.QuickCodes.label",
"Vale.X64.QuickCodes.va_range1",
"Prims.b2t",
"Vale.X64.Decls.va_get_ok",
"Vale.Def.Types_s.quad32",
"Vale.X64.Decls.va_get_xmm",
"Vale.Def.Types_s.reverse_bytes_quad32",
"Vale.AES.GF128_s.gf128_to_quad32",
"Vale.AES.GF128_s.gf128_mul",
"Vale.X64.QuickCode.quickCode",
"Vale.AES.X64.GHash.va_qcode_ReduceMul128_LE"
] | [] | module Vale.AES.X64.GHash
open Vale.Def.Opaque_s
open FStar.Seq
open Vale.Def.Words_s
open Vale.Def.Types_s
open Vale.Arch.Types
open Vale.AES.AES_s
open Vale.AES.GHash_s
open Vale.AES.GHash
open Vale.AES.GF128_s
open Vale.AES.GF128
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open Vale.Math.Poly2_s
open Vale.Poly1305.Math
open Vale.AES.X64.GF128_Mul
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.X64.State
open Vale.X64.Decls
open Vale.X64.InsBasic
open Vale.X64.InsMem
open Vale.X64.InsVector
open Vale.X64.InsAes
open Vale.X64.QuickCode
open Vale.X64.QuickCodes
open Vale.X64.CPU_Features_s
#reset-options "--z3rlimit 30"
//-- Compute_Y0
val va_code_Compute_Y0 : va_dummy:unit -> Tot va_code
[@ "opaque_to_smt" va_qattr]
let va_code_Compute_Y0 () =
(va_Block (va_CCons (va_code_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (va_CNil ())))
val va_codegen_success_Compute_Y0 : va_dummy:unit -> Tot va_pbool
[@ "opaque_to_smt" va_qattr]
let va_codegen_success_Compute_Y0 () =
(va_pbool_and (va_codegen_success_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (va_ttrue ()))
[@ "opaque_to_smt" va_qattr]
let va_qcode_Compute_Y0 (va_mods:va_mods_t) : (va_quickCode unit (va_code_Compute_Y0 ())) =
(qblock va_mods (fun (va_s:va_state) -> let (va_old_s:va_state) = va_s in va_QBind va_range1
"***** PRECONDITION NOT MET AT line 83 column 9 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (fun (va_s:va_state) _ -> va_qPURE
va_range1
"***** PRECONDITION NOT MET AT line 84 column 21 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(fun (_:unit) -> Vale.Arch.Types.lemma_quad32_xor ()) (va_QEmpty (())))))
val va_lemma_Compute_Y0 : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Compute_Y0 ()) va_s0 /\ va_get_ok va_s0 /\
sse_enabled))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_get_xmm 1 va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0 /\ va_state_eq
va_sM (va_update_flags va_sM (va_update_xmm 1 va_sM (va_update_ok va_sM va_s0)))))
[@"opaque_to_smt"]
let va_lemma_Compute_Y0 va_b0 va_s0 =
let (va_mods:va_mods_t) = [va_Mod_flags; va_Mod_xmm 1; va_Mod_ok] in
let va_qc = va_qcode_Compute_Y0 va_mods in
let (va_sM, va_fM, va_g) = va_wp_sound_code_norm (va_code_Compute_Y0 ()) va_qc va_s0 (fun va_s0
va_sM va_g -> let () = va_g in label va_range1
"***** POSTCONDITION NOT MET AT line 77 column 1 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_ok va_sM) /\ label va_range1
"***** POSTCONDITION NOT MET AT line 81 column 39 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_xmm 1 va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0)) in
assert_norm (va_qc.mods == va_mods);
va_lemma_norm_mods ([va_Mod_flags; va_Mod_xmm 1; va_Mod_ok]) va_sM va_s0;
(va_sM, va_fM)
[@ va_qattr]
let va_wp_Compute_Y0 (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ sse_enabled /\ (forall (va_x_xmm1:quad32) (va_x_efl:Vale.X64.Flags.t) . let
va_sM = va_upd_flags va_x_efl (va_upd_xmm 1 va_x_xmm1 va_s0) in va_get_ok va_sM /\ va_get_xmm 1
va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0 ==> va_k va_sM (())))
val va_wpProof_Compute_Y0 : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Compute_Y0 va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Compute_Y0 ()) ([va_Mod_flags;
va_Mod_xmm 1]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@"opaque_to_smt"]
let va_wpProof_Compute_Y0 va_s0 va_k =
let (va_sM, va_f0) = va_lemma_Compute_Y0 (va_code_Compute_Y0 ()) va_s0 in
va_lemma_upd_update va_sM;
assert (va_state_eq va_sM (va_update_flags va_sM (va_update_xmm 1 va_sM (va_update_ok va_sM
va_s0))));
va_lemma_norm_mods ([va_Mod_flags; va_Mod_xmm 1]) va_sM va_s0;
let va_g = () in
(va_sM, va_f0, va_g)
[@ "opaque_to_smt" va_qattr]
let va_quick_Compute_Y0 () : (va_quickCode unit (va_code_Compute_Y0 ())) =
(va_QProc (va_code_Compute_Y0 ()) ([va_Mod_flags; va_Mod_xmm 1]) va_wp_Compute_Y0
va_wpProof_Compute_Y0)
//--
//-- ReduceMul128_LE
val va_code_ReduceMul128_LE : va_dummy:unit -> Tot va_code
[@ "opaque_to_smt" va_qattr]
let va_code_ReduceMul128_LE () =
(va_Block (va_CCons (va_code_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_CCons
(va_code_ReduceMulRev128 ()) (va_CCons (va_code_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8))
(va_CNil ())))))
val va_codegen_success_ReduceMul128_LE : va_dummy:unit -> Tot va_pbool
[@ "opaque_to_smt" va_qattr]
let va_codegen_success_ReduceMul128_LE () =
(va_pbool_and (va_codegen_success_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_pbool_and
(va_codegen_success_ReduceMulRev128 ()) (va_pbool_and (va_codegen_success_Pshufb (va_op_xmm_xmm
1) (va_op_xmm_xmm 8)) (va_ttrue ()))))
[@ "opaque_to_smt" va_qattr]
let va_qcode_ReduceMul128_LE (va_mods:va_mods_t) (a:poly) (b:poly) : (va_quickCode unit
(va_code_ReduceMul128_LE ())) =
(qblock va_mods (fun (va_s:va_state) -> let (va_old_s:va_state) = va_s in va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 104 column 11 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 105 column 20 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_ReduceMulRev128 a b) (va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 106 column 11 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_QEmpty (()))))))
val va_lemma_ReduceMul128_LE : va_b0:va_code -> va_s0:va_state -> a:poly -> b:poly
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_ReduceMul128_LE ()) va_s0 /\ va_get_ok va_s0 /\
(pclmulqdq_enabled /\ avx_enabled /\ sse_enabled /\ Vale.Math.Poly2_s.degree a <= 127 /\
Vale.Math.Poly2_s.degree b <= 127 /\ va_get_xmm 1 va_s0 ==
Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32 a) /\ va_get_xmm 2
va_s0 == Vale.AES.GF128_s.gf128_to_quad32 b /\ va_get_xmm 8 va_s0 == Vale.Def.Words_s.Mkfour
#Vale.Def.Types_s.nat32 202182159 134810123 67438087 66051)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_get_xmm 1 va_sM == Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32
(Vale.AES.GF128_s.gf128_mul a b)) /\ va_state_eq va_sM (va_update_xmm 6 va_sM (va_update_xmm 5
va_sM (va_update_xmm 4 va_sM (va_update_xmm 3 va_sM (va_update_xmm 2 va_sM (va_update_xmm 1
va_sM (va_update_reg64 rR12 va_sM (va_update_flags va_sM (va_update_ok va_sM va_s0))))))))))) | false | false | Vale.AES.X64.GHash.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 30,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val va_lemma_ReduceMul128_LE : va_b0:va_code -> va_s0:va_state -> a:poly -> b:poly
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_ReduceMul128_LE ()) va_s0 /\ va_get_ok va_s0 /\
(pclmulqdq_enabled /\ avx_enabled /\ sse_enabled /\ Vale.Math.Poly2_s.degree a <= 127 /\
Vale.Math.Poly2_s.degree b <= 127 /\ va_get_xmm 1 va_s0 ==
Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32 a) /\ va_get_xmm 2
va_s0 == Vale.AES.GF128_s.gf128_to_quad32 b /\ va_get_xmm 8 va_s0 == Vale.Def.Words_s.Mkfour
#Vale.Def.Types_s.nat32 202182159 134810123 67438087 66051)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_get_xmm 1 va_sM == Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32
(Vale.AES.GF128_s.gf128_mul a b)) /\ va_state_eq va_sM (va_update_xmm 6 va_sM (va_update_xmm 5
va_sM (va_update_xmm 4 va_sM (va_update_xmm 3 va_sM (va_update_xmm 2 va_sM (va_update_xmm 1
va_sM (va_update_reg64 rR12 va_sM (va_update_flags va_sM (va_update_ok va_sM va_s0))))))))))) | [] | Vale.AES.X64.GHash.va_lemma_ReduceMul128_LE | {
"file_name": "obj/Vale.AES.X64.GHash.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
va_b0: Vale.X64.Decls.va_code ->
va_s0: Vale.X64.Decls.va_state ->
a: Vale.Math.Poly2_s.poly ->
b: Vale.Math.Poly2_s.poly
-> Prims.Ghost (Vale.X64.Decls.va_state * Vale.X64.Decls.va_fuel) | {
"end_col": 16,
"end_line": 155,
"start_col": 46,
"start_line": 141
} |
Prims.Ghost | val va_wpProof_Compute_ghash_incremental_register : va_s0:va_state -> va_k:(va_state -> unit ->
Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Compute_ghash_incremental_register va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Compute_ghash_incremental_register ())
([va_Mod_xmm 6; va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2; va_Mod_reg64 rR12;
va_Mod_flags; va_Mod_xmm 1]) va_s0 va_k ((va_sM, va_f0, va_g)))) | [
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64.GF128_Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Poly1305.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let va_wpProof_Compute_ghash_incremental_register va_s0 va_k =
let (va_sM, va_f0) = va_lemma_Compute_ghash_incremental_register
(va_code_Compute_ghash_incremental_register ()) va_s0 in
va_lemma_upd_update va_sM;
assert (va_state_eq va_sM (va_update_xmm 6 va_sM (va_update_xmm 5 va_sM (va_update_xmm 4 va_sM
(va_update_xmm 3 va_sM (va_update_xmm 2 va_sM (va_update_reg64 rR12 va_sM (va_update_flags
va_sM (va_update_xmm 1 va_sM (va_update_ok va_sM va_s0))))))))));
va_lemma_norm_mods ([va_Mod_xmm 6; va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2;
va_Mod_reg64 rR12; va_Mod_flags; va_Mod_xmm 1]) va_sM va_s0;
let va_g = () in
(va_sM, va_f0, va_g) | val va_wpProof_Compute_ghash_incremental_register : va_s0:va_state -> va_k:(va_state -> unit ->
Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Compute_ghash_incremental_register va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Compute_ghash_incremental_register ())
([va_Mod_xmm 6; va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2; va_Mod_reg64 rR12;
va_Mod_flags; va_Mod_xmm 1]) va_s0 va_k ((va_sM, va_f0, va_g))))
let va_wpProof_Compute_ghash_incremental_register va_s0 va_k = | false | null | false | let va_sM, va_f0 =
va_lemma_Compute_ghash_incremental_register (va_code_Compute_ghash_incremental_register ()) va_s0
in
va_lemma_upd_update va_sM;
assert (va_state_eq va_sM
(va_update_xmm 6
va_sM
(va_update_xmm 5
va_sM
(va_update_xmm 4
va_sM
(va_update_xmm 3
va_sM
(va_update_xmm 2
va_sM
(va_update_reg64 rR12
va_sM
(va_update_flags va_sM
(va_update_xmm 1 va_sM (va_update_ok va_sM va_s0))))))))));
va_lemma_norm_mods ([
va_Mod_xmm 6;
va_Mod_xmm 5;
va_Mod_xmm 4;
va_Mod_xmm 3;
va_Mod_xmm 2;
va_Mod_reg64 rR12;
va_Mod_flags;
va_Mod_xmm 1
])
va_sM
va_s0;
let va_g = () in
(va_sM, va_f0, va_g) | {
"checked_file": "Vale.AES.X64.GHash.fst.checked",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.QuickCodes.fsti.checked",
"Vale.X64.QuickCode.fst.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.InsVector.fsti.checked",
"Vale.X64.InsMem.fsti.checked",
"Vale.X64.InsBasic.fsti.checked",
"Vale.X64.InsAes.fsti.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.X64.Decls.fsti.checked",
"Vale.X64.CPU_Features_s.fst.checked",
"Vale.Poly1305.Math.fsti.checked",
"Vale.Math.Poly2_s.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.X64.GF128_Mul.fsti.checked",
"Vale.AES.GHash_s.fst.checked",
"Vale.AES.GHash.fsti.checked",
"Vale.AES.GF128_s.fsti.checked",
"Vale.AES.GF128.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.Seq.Base.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.X64.GHash.fst"
} | [] | [
"Vale.X64.Decls.va_state",
"Prims.unit",
"Vale.X64.Decls.va_fuel",
"FStar.Pervasives.Native.Mktuple3",
"Vale.X64.QuickCode.va_lemma_norm_mods",
"Prims.Cons",
"Vale.X64.QuickCode.mod_t",
"Vale.X64.QuickCode.va_Mod_xmm",
"Vale.X64.QuickCode.va_Mod_reg64",
"Vale.X64.Machine_s.rR12",
"Vale.X64.QuickCode.va_Mod_flags",
"Prims.Nil",
"Prims._assert",
"Vale.X64.Decls.va_state_eq",
"Vale.X64.Decls.va_update_xmm",
"Vale.X64.Decls.va_update_reg64",
"Vale.X64.Decls.va_update_flags",
"Vale.X64.Decls.va_update_ok",
"Vale.X64.Decls.va_lemma_upd_update",
"FStar.Pervasives.Native.tuple3",
"FStar.Pervasives.Native.tuple2",
"Vale.X64.State.vale_state",
"Vale.AES.X64.GHash.va_lemma_Compute_ghash_incremental_register",
"Vale.AES.X64.GHash.va_code_Compute_ghash_incremental_register"
] | [] | module Vale.AES.X64.GHash
open Vale.Def.Opaque_s
open FStar.Seq
open Vale.Def.Words_s
open Vale.Def.Types_s
open Vale.Arch.Types
open Vale.AES.AES_s
open Vale.AES.GHash_s
open Vale.AES.GHash
open Vale.AES.GF128_s
open Vale.AES.GF128
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open Vale.Math.Poly2_s
open Vale.Poly1305.Math
open Vale.AES.X64.GF128_Mul
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.X64.State
open Vale.X64.Decls
open Vale.X64.InsBasic
open Vale.X64.InsMem
open Vale.X64.InsVector
open Vale.X64.InsAes
open Vale.X64.QuickCode
open Vale.X64.QuickCodes
open Vale.X64.CPU_Features_s
#reset-options "--z3rlimit 30"
//-- Compute_Y0
val va_code_Compute_Y0 : va_dummy:unit -> Tot va_code
[@ "opaque_to_smt" va_qattr]
let va_code_Compute_Y0 () =
(va_Block (va_CCons (va_code_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (va_CNil ())))
val va_codegen_success_Compute_Y0 : va_dummy:unit -> Tot va_pbool
[@ "opaque_to_smt" va_qattr]
let va_codegen_success_Compute_Y0 () =
(va_pbool_and (va_codegen_success_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (va_ttrue ()))
[@ "opaque_to_smt" va_qattr]
let va_qcode_Compute_Y0 (va_mods:va_mods_t) : (va_quickCode unit (va_code_Compute_Y0 ())) =
(qblock va_mods (fun (va_s:va_state) -> let (va_old_s:va_state) = va_s in va_QBind va_range1
"***** PRECONDITION NOT MET AT line 83 column 9 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (fun (va_s:va_state) _ -> va_qPURE
va_range1
"***** PRECONDITION NOT MET AT line 84 column 21 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(fun (_:unit) -> Vale.Arch.Types.lemma_quad32_xor ()) (va_QEmpty (())))))
val va_lemma_Compute_Y0 : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Compute_Y0 ()) va_s0 /\ va_get_ok va_s0 /\
sse_enabled))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_get_xmm 1 va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0 /\ va_state_eq
va_sM (va_update_flags va_sM (va_update_xmm 1 va_sM (va_update_ok va_sM va_s0)))))
[@"opaque_to_smt"]
let va_lemma_Compute_Y0 va_b0 va_s0 =
let (va_mods:va_mods_t) = [va_Mod_flags; va_Mod_xmm 1; va_Mod_ok] in
let va_qc = va_qcode_Compute_Y0 va_mods in
let (va_sM, va_fM, va_g) = va_wp_sound_code_norm (va_code_Compute_Y0 ()) va_qc va_s0 (fun va_s0
va_sM va_g -> let () = va_g in label va_range1
"***** POSTCONDITION NOT MET AT line 77 column 1 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_ok va_sM) /\ label va_range1
"***** POSTCONDITION NOT MET AT line 81 column 39 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_xmm 1 va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0)) in
assert_norm (va_qc.mods == va_mods);
va_lemma_norm_mods ([va_Mod_flags; va_Mod_xmm 1; va_Mod_ok]) va_sM va_s0;
(va_sM, va_fM)
[@ va_qattr]
let va_wp_Compute_Y0 (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ sse_enabled /\ (forall (va_x_xmm1:quad32) (va_x_efl:Vale.X64.Flags.t) . let
va_sM = va_upd_flags va_x_efl (va_upd_xmm 1 va_x_xmm1 va_s0) in va_get_ok va_sM /\ va_get_xmm 1
va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0 ==> va_k va_sM (())))
val va_wpProof_Compute_Y0 : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Compute_Y0 va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Compute_Y0 ()) ([va_Mod_flags;
va_Mod_xmm 1]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@"opaque_to_smt"]
let va_wpProof_Compute_Y0 va_s0 va_k =
let (va_sM, va_f0) = va_lemma_Compute_Y0 (va_code_Compute_Y0 ()) va_s0 in
va_lemma_upd_update va_sM;
assert (va_state_eq va_sM (va_update_flags va_sM (va_update_xmm 1 va_sM (va_update_ok va_sM
va_s0))));
va_lemma_norm_mods ([va_Mod_flags; va_Mod_xmm 1]) va_sM va_s0;
let va_g = () in
(va_sM, va_f0, va_g)
[@ "opaque_to_smt" va_qattr]
let va_quick_Compute_Y0 () : (va_quickCode unit (va_code_Compute_Y0 ())) =
(va_QProc (va_code_Compute_Y0 ()) ([va_Mod_flags; va_Mod_xmm 1]) va_wp_Compute_Y0
va_wpProof_Compute_Y0)
//--
//-- ReduceMul128_LE
val va_code_ReduceMul128_LE : va_dummy:unit -> Tot va_code
[@ "opaque_to_smt" va_qattr]
let va_code_ReduceMul128_LE () =
(va_Block (va_CCons (va_code_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_CCons
(va_code_ReduceMulRev128 ()) (va_CCons (va_code_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8))
(va_CNil ())))))
val va_codegen_success_ReduceMul128_LE : va_dummy:unit -> Tot va_pbool
[@ "opaque_to_smt" va_qattr]
let va_codegen_success_ReduceMul128_LE () =
(va_pbool_and (va_codegen_success_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_pbool_and
(va_codegen_success_ReduceMulRev128 ()) (va_pbool_and (va_codegen_success_Pshufb (va_op_xmm_xmm
1) (va_op_xmm_xmm 8)) (va_ttrue ()))))
[@ "opaque_to_smt" va_qattr]
let va_qcode_ReduceMul128_LE (va_mods:va_mods_t) (a:poly) (b:poly) : (va_quickCode unit
(va_code_ReduceMul128_LE ())) =
(qblock va_mods (fun (va_s:va_state) -> let (va_old_s:va_state) = va_s in va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 104 column 11 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 105 column 20 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_ReduceMulRev128 a b) (va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 106 column 11 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_QEmpty (()))))))
val va_lemma_ReduceMul128_LE : va_b0:va_code -> va_s0:va_state -> a:poly -> b:poly
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_ReduceMul128_LE ()) va_s0 /\ va_get_ok va_s0 /\
(pclmulqdq_enabled /\ avx_enabled /\ sse_enabled /\ Vale.Math.Poly2_s.degree a <= 127 /\
Vale.Math.Poly2_s.degree b <= 127 /\ va_get_xmm 1 va_s0 ==
Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32 a) /\ va_get_xmm 2
va_s0 == Vale.AES.GF128_s.gf128_to_quad32 b /\ va_get_xmm 8 va_s0 == Vale.Def.Words_s.Mkfour
#Vale.Def.Types_s.nat32 202182159 134810123 67438087 66051)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_get_xmm 1 va_sM == Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32
(Vale.AES.GF128_s.gf128_mul a b)) /\ va_state_eq va_sM (va_update_xmm 6 va_sM (va_update_xmm 5
va_sM (va_update_xmm 4 va_sM (va_update_xmm 3 va_sM (va_update_xmm 2 va_sM (va_update_xmm 1
va_sM (va_update_reg64 rR12 va_sM (va_update_flags va_sM (va_update_ok va_sM va_s0)))))))))))
[@"opaque_to_smt"]
let va_lemma_ReduceMul128_LE va_b0 va_s0 a b =
let (va_mods:va_mods_t) = [va_Mod_xmm 6; va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2;
va_Mod_xmm 1; va_Mod_reg64 rR12; va_Mod_flags; va_Mod_ok] in
let va_qc = va_qcode_ReduceMul128_LE va_mods a b in
let (va_sM, va_fM, va_g) = va_wp_sound_code_norm (va_code_ReduceMul128_LE ()) va_qc va_s0 (fun
va_s0 va_sM va_g -> let () = va_g in label va_range1
"***** POSTCONDITION NOT MET AT line 87 column 1 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_ok va_sM) /\ label va_range1
"***** POSTCONDITION NOT MET AT line 102 column 71 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_xmm 1 va_sM == Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32
(Vale.AES.GF128_s.gf128_mul a b)))) in
assert_norm (va_qc.mods == va_mods);
va_lemma_norm_mods ([va_Mod_xmm 6; va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2;
va_Mod_xmm 1; va_Mod_reg64 rR12; va_Mod_flags; va_Mod_ok]) va_sM va_s0;
(va_sM, va_fM)
[@ va_qattr]
let va_wp_ReduceMul128_LE (a:poly) (b:poly) (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) :
Type0 =
(va_get_ok va_s0 /\ (pclmulqdq_enabled /\ avx_enabled /\ sse_enabled /\ Vale.Math.Poly2_s.degree
a <= 127 /\ Vale.Math.Poly2_s.degree b <= 127 /\ va_get_xmm 1 va_s0 ==
Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32 a) /\ va_get_xmm 2
va_s0 == Vale.AES.GF128_s.gf128_to_quad32 b /\ va_get_xmm 8 va_s0 == Vale.Def.Words_s.Mkfour
#Vale.Def.Types_s.nat32 202182159 134810123 67438087 66051) /\ (forall
(va_x_efl:Vale.X64.Flags.t) (va_x_r12:nat64) (va_x_xmm1:quad32) (va_x_xmm2:quad32)
(va_x_xmm3:quad32) (va_x_xmm4:quad32) (va_x_xmm5:quad32) (va_x_xmm6:quad32) . let va_sM =
va_upd_xmm 6 va_x_xmm6 (va_upd_xmm 5 va_x_xmm5 (va_upd_xmm 4 va_x_xmm4 (va_upd_xmm 3 va_x_xmm3
(va_upd_xmm 2 va_x_xmm2 (va_upd_xmm 1 va_x_xmm1 (va_upd_reg64 rR12 va_x_r12 (va_upd_flags
va_x_efl va_s0))))))) in va_get_ok va_sM /\ va_get_xmm 1 va_sM ==
Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32
(Vale.AES.GF128_s.gf128_mul a b)) ==> va_k va_sM (())))
val va_wpProof_ReduceMul128_LE : a:poly -> b:poly -> va_s0:va_state -> va_k:(va_state -> unit ->
Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_ReduceMul128_LE a b va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_ReduceMul128_LE ()) ([va_Mod_xmm 6;
va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2; va_Mod_xmm 1; va_Mod_reg64 rR12;
va_Mod_flags]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@"opaque_to_smt"]
let va_wpProof_ReduceMul128_LE a b va_s0 va_k =
let (va_sM, va_f0) = va_lemma_ReduceMul128_LE (va_code_ReduceMul128_LE ()) va_s0 a b in
va_lemma_upd_update va_sM;
assert (va_state_eq va_sM (va_update_xmm 6 va_sM (va_update_xmm 5 va_sM (va_update_xmm 4 va_sM
(va_update_xmm 3 va_sM (va_update_xmm 2 va_sM (va_update_xmm 1 va_sM (va_update_reg64 rR12
va_sM (va_update_flags va_sM (va_update_ok va_sM va_s0))))))))));
va_lemma_norm_mods ([va_Mod_xmm 6; va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2;
va_Mod_xmm 1; va_Mod_reg64 rR12; va_Mod_flags]) va_sM va_s0;
let va_g = () in
(va_sM, va_f0, va_g)
[@ "opaque_to_smt" va_qattr]
let va_quick_ReduceMul128_LE (a:poly) (b:poly) : (va_quickCode unit (va_code_ReduceMul128_LE ())) =
(va_QProc (va_code_ReduceMul128_LE ()) ([va_Mod_xmm 6; va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3;
va_Mod_xmm 2; va_Mod_xmm 1; va_Mod_reg64 rR12; va_Mod_flags]) (va_wp_ReduceMul128_LE a b)
(va_wpProof_ReduceMul128_LE a b))
//--
//-- Compute_ghash_incremental_register
[@ "opaque_to_smt" va_qattr]
let va_code_Compute_ghash_incremental_register () =
(va_Block (va_CCons (va_code_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 2)) (va_CCons (va_code_Mov128
(va_op_xmm_xmm 2) (va_op_xmm_xmm 11)) (va_CCons (va_code_ReduceMul128_LE ()) (va_CNil ())))))
[@ "opaque_to_smt" va_qattr]
let va_codegen_success_Compute_ghash_incremental_register () =
(va_pbool_and (va_codegen_success_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 2)) (va_pbool_and
(va_codegen_success_Mov128 (va_op_xmm_xmm 2) (va_op_xmm_xmm 11)) (va_pbool_and
(va_codegen_success_ReduceMul128_LE ()) (va_ttrue ()))))
[@ "opaque_to_smt" va_qattr]
let va_qcode_Compute_ghash_incremental_register (va_mods:va_mods_t) : (va_quickCode unit
(va_code_Compute_ghash_incremental_register ())) =
(qblock va_mods (fun (va_s:va_state) -> let (va_old_s:va_state) = va_s in va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 124 column 9 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 2)) (va_QBind va_range1
"***** PRECONDITION NOT MET AT line 125 column 11 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Mov128 (va_op_xmm_xmm 2) (va_op_xmm_xmm 11)) (fun (va_s:va_state) _ -> va_QBind
va_range1
"***** PRECONDITION NOT MET AT line 127 column 20 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_ReduceMul128_LE (Vale.AES.GF128_s.gf128_of_quad32
(Vale.Def.Types_s.reverse_bytes_quad32 (va_get_xmm 1 va_s))) (Vale.AES.GF128_s.gf128_of_quad32
(va_get_xmm 11 va_s))) (fun (va_s:va_state) _ -> va_qPURE va_range1
"***** PRECONDITION NOT MET AT line 128 column 29 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(fun (_:unit) -> Vale.AES.GHash.ghash_incremental_reveal ()) (va_QEmpty (())))))))
[@"opaque_to_smt"]
let va_lemma_Compute_ghash_incremental_register va_b0 va_s0 =
let (va_mods:va_mods_t) = [va_Mod_xmm 6; va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2;
va_Mod_reg64 rR12; va_Mod_flags; va_Mod_xmm 1; va_Mod_ok] in
let va_qc = va_qcode_Compute_ghash_incremental_register va_mods in
let (va_sM, va_fM, va_g) = va_wp_sound_code_norm (va_code_Compute_ghash_incremental_register ())
va_qc va_s0 (fun va_s0 va_sM va_g -> let () = va_g in label va_range1
"***** POSTCONDITION NOT MET AT line 109 column 1 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_ok va_sM) /\ label va_range1
"***** POSTCONDITION NOT MET AT line 122 column 89 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_xmm 1 va_sM == Vale.AES.GHash.ghash_incremental (Vale.Def.Types_s.reverse_bytes_quad32
(va_get_xmm 11 va_sM)) (va_get_xmm 1 va_s0) (FStar.Seq.Base.create #quad32 1 (va_get_xmm 2
va_s0)))) in
assert_norm (va_qc.mods == va_mods);
va_lemma_norm_mods ([va_Mod_xmm 6; va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2;
va_Mod_reg64 rR12; va_Mod_flags; va_Mod_xmm 1; va_Mod_ok]) va_sM va_s0;
(va_sM, va_fM) | false | false | Vale.AES.X64.GHash.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 30,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val va_wpProof_Compute_ghash_incremental_register : va_s0:va_state -> va_k:(va_state -> unit ->
Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Compute_ghash_incremental_register va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Compute_ghash_incremental_register ())
([va_Mod_xmm 6; va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2; va_Mod_reg64 rR12;
va_Mod_flags; va_Mod_xmm 1]) va_s0 va_k ((va_sM, va_f0, va_g)))) | [] | Vale.AES.X64.GHash.va_wpProof_Compute_ghash_incremental_register | {
"file_name": "obj/Vale.AES.X64.GHash.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | va_s0: Vale.X64.Decls.va_state -> va_k: (_: Vale.X64.Decls.va_state -> _: Prims.unit -> Type0)
-> Prims.Ghost ((Vale.X64.Decls.va_state * Vale.X64.Decls.va_fuel) * Prims.unit) | {
"end_col": 22,
"end_line": 258,
"start_col": 62,
"start_line": 248
} |
Prims.Ghost | val va_lemma_Compute_ghash_incremental_register : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Compute_ghash_incremental_register ()) va_s0 /\
va_get_ok va_s0 /\ (pclmulqdq_enabled /\ avx_enabled /\ sse_enabled /\ va_get_xmm 8 va_s0 ==
Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 202182159 134810123 67438087 66051)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_get_xmm 1 va_sM == Vale.AES.GHash.ghash_incremental (Vale.Def.Types_s.reverse_bytes_quad32
(va_get_xmm 11 va_sM)) (va_get_xmm 1 va_s0) (FStar.Seq.Base.create #quad32 1 (va_get_xmm 2
va_s0)) /\ va_state_eq va_sM (va_update_xmm 6 va_sM (va_update_xmm 5 va_sM (va_update_xmm 4
va_sM (va_update_xmm 3 va_sM (va_update_xmm 2 va_sM (va_update_reg64 rR12 va_sM
(va_update_flags va_sM (va_update_xmm 1 va_sM (va_update_ok va_sM va_s0))))))))))) | [
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64.GF128_Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Poly1305.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GHash_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let va_lemma_Compute_ghash_incremental_register va_b0 va_s0 =
let (va_mods:va_mods_t) = [va_Mod_xmm 6; va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2;
va_Mod_reg64 rR12; va_Mod_flags; va_Mod_xmm 1; va_Mod_ok] in
let va_qc = va_qcode_Compute_ghash_incremental_register va_mods in
let (va_sM, va_fM, va_g) = va_wp_sound_code_norm (va_code_Compute_ghash_incremental_register ())
va_qc va_s0 (fun va_s0 va_sM va_g -> let () = va_g in label va_range1
"***** POSTCONDITION NOT MET AT line 109 column 1 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_ok va_sM) /\ label va_range1
"***** POSTCONDITION NOT MET AT line 122 column 89 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_xmm 1 va_sM == Vale.AES.GHash.ghash_incremental (Vale.Def.Types_s.reverse_bytes_quad32
(va_get_xmm 11 va_sM)) (va_get_xmm 1 va_s0) (FStar.Seq.Base.create #quad32 1 (va_get_xmm 2
va_s0)))) in
assert_norm (va_qc.mods == va_mods);
va_lemma_norm_mods ([va_Mod_xmm 6; va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2;
va_Mod_reg64 rR12; va_Mod_flags; va_Mod_xmm 1; va_Mod_ok]) va_sM va_s0;
(va_sM, va_fM) | val va_lemma_Compute_ghash_incremental_register : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Compute_ghash_incremental_register ()) va_s0 /\
va_get_ok va_s0 /\ (pclmulqdq_enabled /\ avx_enabled /\ sse_enabled /\ va_get_xmm 8 va_s0 ==
Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 202182159 134810123 67438087 66051)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_get_xmm 1 va_sM == Vale.AES.GHash.ghash_incremental (Vale.Def.Types_s.reverse_bytes_quad32
(va_get_xmm 11 va_sM)) (va_get_xmm 1 va_s0) (FStar.Seq.Base.create #quad32 1 (va_get_xmm 2
va_s0)) /\ va_state_eq va_sM (va_update_xmm 6 va_sM (va_update_xmm 5 va_sM (va_update_xmm 4
va_sM (va_update_xmm 3 va_sM (va_update_xmm 2 va_sM (va_update_reg64 rR12 va_sM
(va_update_flags va_sM (va_update_xmm 1 va_sM (va_update_ok va_sM va_s0)))))))))))
let va_lemma_Compute_ghash_incremental_register va_b0 va_s0 = | false | null | false | let va_mods:va_mods_t =
[
va_Mod_xmm 6;
va_Mod_xmm 5;
va_Mod_xmm 4;
va_Mod_xmm 3;
va_Mod_xmm 2;
va_Mod_reg64 rR12;
va_Mod_flags;
va_Mod_xmm 1;
va_Mod_ok
]
in
let va_qc = va_qcode_Compute_ghash_incremental_register va_mods in
let va_sM, va_fM, va_g =
va_wp_sound_code_norm (va_code_Compute_ghash_incremental_register ())
va_qc
va_s0
(fun va_s0 va_sM va_g ->
let () = va_g in
label va_range1
"***** POSTCONDITION NOT MET AT line 109 column 1 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_ok va_sM) /\
label va_range1
"***** POSTCONDITION NOT MET AT line 122 column 89 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_xmm 1 va_sM ==
Vale.AES.GHash.ghash_incremental (Vale.Def.Types_s.reverse_bytes_quad32 (va_get_xmm 11
va_sM))
(va_get_xmm 1 va_s0)
(FStar.Seq.Base.create #quad32 1 (va_get_xmm 2 va_s0))))
in
assert_norm (va_qc.mods == va_mods);
va_lemma_norm_mods ([
va_Mod_xmm 6;
va_Mod_xmm 5;
va_Mod_xmm 4;
va_Mod_xmm 3;
va_Mod_xmm 2;
va_Mod_reg64 rR12;
va_Mod_flags;
va_Mod_xmm 1;
va_Mod_ok
])
va_sM
va_s0;
(va_sM, va_fM) | {
"checked_file": "Vale.AES.X64.GHash.fst.checked",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.QuickCodes.fsti.checked",
"Vale.X64.QuickCode.fst.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.InsVector.fsti.checked",
"Vale.X64.InsMem.fsti.checked",
"Vale.X64.InsBasic.fsti.checked",
"Vale.X64.InsAes.fsti.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.X64.Decls.fsti.checked",
"Vale.X64.CPU_Features_s.fst.checked",
"Vale.Poly1305.Math.fsti.checked",
"Vale.Math.Poly2_s.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.X64.GF128_Mul.fsti.checked",
"Vale.AES.GHash_s.fst.checked",
"Vale.AES.GHash.fsti.checked",
"Vale.AES.GF128_s.fsti.checked",
"Vale.AES.GF128.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.Seq.Base.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.X64.GHash.fst"
} | [] | [
"Vale.X64.Decls.va_code",
"Vale.X64.Decls.va_state",
"Vale.X64.QuickCodes.fuel",
"Prims.unit",
"FStar.Pervasives.Native.Mktuple2",
"Vale.X64.Decls.va_fuel",
"Vale.X64.QuickCode.va_lemma_norm_mods",
"Prims.Cons",
"Vale.X64.QuickCode.mod_t",
"Vale.X64.QuickCode.va_Mod_xmm",
"Vale.X64.QuickCode.va_Mod_reg64",
"Vale.X64.Machine_s.rR12",
"Vale.X64.QuickCode.va_Mod_flags",
"Vale.X64.QuickCode.va_Mod_ok",
"Prims.Nil",
"FStar.Pervasives.assert_norm",
"Prims.eq2",
"Prims.list",
"Vale.X64.QuickCode.__proj__QProc__item__mods",
"Vale.AES.X64.GHash.va_code_Compute_ghash_incremental_register",
"FStar.Pervasives.Native.tuple2",
"FStar.Pervasives.Native.tuple3",
"Vale.X64.State.vale_state",
"Vale.X64.QuickCodes.va_wp_sound_code_norm",
"Prims.l_and",
"Vale.X64.QuickCodes.label",
"Vale.X64.QuickCodes.va_range1",
"Prims.b2t",
"Vale.X64.Decls.va_get_ok",
"Vale.Def.Types_s.quad32",
"Vale.X64.Decls.va_get_xmm",
"Vale.AES.GHash.ghash_incremental",
"Vale.Def.Types_s.reverse_bytes_quad32",
"FStar.Seq.Base.create",
"Vale.X64.Decls.quad32",
"Vale.X64.QuickCode.quickCode",
"Vale.AES.X64.GHash.va_qcode_Compute_ghash_incremental_register"
] | [] | module Vale.AES.X64.GHash
open Vale.Def.Opaque_s
open FStar.Seq
open Vale.Def.Words_s
open Vale.Def.Types_s
open Vale.Arch.Types
open Vale.AES.AES_s
open Vale.AES.GHash_s
open Vale.AES.GHash
open Vale.AES.GF128_s
open Vale.AES.GF128
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open Vale.Math.Poly2_s
open Vale.Poly1305.Math
open Vale.AES.X64.GF128_Mul
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.X64.State
open Vale.X64.Decls
open Vale.X64.InsBasic
open Vale.X64.InsMem
open Vale.X64.InsVector
open Vale.X64.InsAes
open Vale.X64.QuickCode
open Vale.X64.QuickCodes
open Vale.X64.CPU_Features_s
#reset-options "--z3rlimit 30"
//-- Compute_Y0
val va_code_Compute_Y0 : va_dummy:unit -> Tot va_code
[@ "opaque_to_smt" va_qattr]
let va_code_Compute_Y0 () =
(va_Block (va_CCons (va_code_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (va_CNil ())))
val va_codegen_success_Compute_Y0 : va_dummy:unit -> Tot va_pbool
[@ "opaque_to_smt" va_qattr]
let va_codegen_success_Compute_Y0 () =
(va_pbool_and (va_codegen_success_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (va_ttrue ()))
[@ "opaque_to_smt" va_qattr]
let va_qcode_Compute_Y0 (va_mods:va_mods_t) : (va_quickCode unit (va_code_Compute_Y0 ())) =
(qblock va_mods (fun (va_s:va_state) -> let (va_old_s:va_state) = va_s in va_QBind va_range1
"***** PRECONDITION NOT MET AT line 83 column 9 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 1)) (fun (va_s:va_state) _ -> va_qPURE
va_range1
"***** PRECONDITION NOT MET AT line 84 column 21 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(fun (_:unit) -> Vale.Arch.Types.lemma_quad32_xor ()) (va_QEmpty (())))))
val va_lemma_Compute_Y0 : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Compute_Y0 ()) va_s0 /\ va_get_ok va_s0 /\
sse_enabled))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_get_xmm 1 va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0 /\ va_state_eq
va_sM (va_update_flags va_sM (va_update_xmm 1 va_sM (va_update_ok va_sM va_s0)))))
[@"opaque_to_smt"]
let va_lemma_Compute_Y0 va_b0 va_s0 =
let (va_mods:va_mods_t) = [va_Mod_flags; va_Mod_xmm 1; va_Mod_ok] in
let va_qc = va_qcode_Compute_Y0 va_mods in
let (va_sM, va_fM, va_g) = va_wp_sound_code_norm (va_code_Compute_Y0 ()) va_qc va_s0 (fun va_s0
va_sM va_g -> let () = va_g in label va_range1
"***** POSTCONDITION NOT MET AT line 77 column 1 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_ok va_sM) /\ label va_range1
"***** POSTCONDITION NOT MET AT line 81 column 39 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_xmm 1 va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0)) in
assert_norm (va_qc.mods == va_mods);
va_lemma_norm_mods ([va_Mod_flags; va_Mod_xmm 1; va_Mod_ok]) va_sM va_s0;
(va_sM, va_fM)
[@ va_qattr]
let va_wp_Compute_Y0 (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ sse_enabled /\ (forall (va_x_xmm1:quad32) (va_x_efl:Vale.X64.Flags.t) . let
va_sM = va_upd_flags va_x_efl (va_upd_xmm 1 va_x_xmm1 va_s0) in va_get_ok va_sM /\ va_get_xmm 1
va_sM == Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 0 0 0 0 ==> va_k va_sM (())))
val va_wpProof_Compute_Y0 : va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_Compute_Y0 va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_Compute_Y0 ()) ([va_Mod_flags;
va_Mod_xmm 1]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@"opaque_to_smt"]
let va_wpProof_Compute_Y0 va_s0 va_k =
let (va_sM, va_f0) = va_lemma_Compute_Y0 (va_code_Compute_Y0 ()) va_s0 in
va_lemma_upd_update va_sM;
assert (va_state_eq va_sM (va_update_flags va_sM (va_update_xmm 1 va_sM (va_update_ok va_sM
va_s0))));
va_lemma_norm_mods ([va_Mod_flags; va_Mod_xmm 1]) va_sM va_s0;
let va_g = () in
(va_sM, va_f0, va_g)
[@ "opaque_to_smt" va_qattr]
let va_quick_Compute_Y0 () : (va_quickCode unit (va_code_Compute_Y0 ())) =
(va_QProc (va_code_Compute_Y0 ()) ([va_Mod_flags; va_Mod_xmm 1]) va_wp_Compute_Y0
va_wpProof_Compute_Y0)
//--
//-- ReduceMul128_LE
val va_code_ReduceMul128_LE : va_dummy:unit -> Tot va_code
[@ "opaque_to_smt" va_qattr]
let va_code_ReduceMul128_LE () =
(va_Block (va_CCons (va_code_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_CCons
(va_code_ReduceMulRev128 ()) (va_CCons (va_code_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8))
(va_CNil ())))))
val va_codegen_success_ReduceMul128_LE : va_dummy:unit -> Tot va_pbool
[@ "opaque_to_smt" va_qattr]
let va_codegen_success_ReduceMul128_LE () =
(va_pbool_and (va_codegen_success_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_pbool_and
(va_codegen_success_ReduceMulRev128 ()) (va_pbool_and (va_codegen_success_Pshufb (va_op_xmm_xmm
1) (va_op_xmm_xmm 8)) (va_ttrue ()))))
[@ "opaque_to_smt" va_qattr]
let va_qcode_ReduceMul128_LE (va_mods:va_mods_t) (a:poly) (b:poly) : (va_quickCode unit
(va_code_ReduceMul128_LE ())) =
(qblock va_mods (fun (va_s:va_state) -> let (va_old_s:va_state) = va_s in va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 104 column 11 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 105 column 20 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_ReduceMulRev128 a b) (va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 106 column 11 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pshufb (va_op_xmm_xmm 1) (va_op_xmm_xmm 8)) (va_QEmpty (()))))))
val va_lemma_ReduceMul128_LE : va_b0:va_code -> va_s0:va_state -> a:poly -> b:poly
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_ReduceMul128_LE ()) va_s0 /\ va_get_ok va_s0 /\
(pclmulqdq_enabled /\ avx_enabled /\ sse_enabled /\ Vale.Math.Poly2_s.degree a <= 127 /\
Vale.Math.Poly2_s.degree b <= 127 /\ va_get_xmm 1 va_s0 ==
Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32 a) /\ va_get_xmm 2
va_s0 == Vale.AES.GF128_s.gf128_to_quad32 b /\ va_get_xmm 8 va_s0 == Vale.Def.Words_s.Mkfour
#Vale.Def.Types_s.nat32 202182159 134810123 67438087 66051)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_get_xmm 1 va_sM == Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32
(Vale.AES.GF128_s.gf128_mul a b)) /\ va_state_eq va_sM (va_update_xmm 6 va_sM (va_update_xmm 5
va_sM (va_update_xmm 4 va_sM (va_update_xmm 3 va_sM (va_update_xmm 2 va_sM (va_update_xmm 1
va_sM (va_update_reg64 rR12 va_sM (va_update_flags va_sM (va_update_ok va_sM va_s0)))))))))))
[@"opaque_to_smt"]
let va_lemma_ReduceMul128_LE va_b0 va_s0 a b =
let (va_mods:va_mods_t) = [va_Mod_xmm 6; va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2;
va_Mod_xmm 1; va_Mod_reg64 rR12; va_Mod_flags; va_Mod_ok] in
let va_qc = va_qcode_ReduceMul128_LE va_mods a b in
let (va_sM, va_fM, va_g) = va_wp_sound_code_norm (va_code_ReduceMul128_LE ()) va_qc va_s0 (fun
va_s0 va_sM va_g -> let () = va_g in label va_range1
"***** POSTCONDITION NOT MET AT line 87 column 1 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_ok va_sM) /\ label va_range1
"***** POSTCONDITION NOT MET AT line 102 column 71 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_get_xmm 1 va_sM == Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32
(Vale.AES.GF128_s.gf128_mul a b)))) in
assert_norm (va_qc.mods == va_mods);
va_lemma_norm_mods ([va_Mod_xmm 6; va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2;
va_Mod_xmm 1; va_Mod_reg64 rR12; va_Mod_flags; va_Mod_ok]) va_sM va_s0;
(va_sM, va_fM)
[@ va_qattr]
let va_wp_ReduceMul128_LE (a:poly) (b:poly) (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) :
Type0 =
(va_get_ok va_s0 /\ (pclmulqdq_enabled /\ avx_enabled /\ sse_enabled /\ Vale.Math.Poly2_s.degree
a <= 127 /\ Vale.Math.Poly2_s.degree b <= 127 /\ va_get_xmm 1 va_s0 ==
Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32 a) /\ va_get_xmm 2
va_s0 == Vale.AES.GF128_s.gf128_to_quad32 b /\ va_get_xmm 8 va_s0 == Vale.Def.Words_s.Mkfour
#Vale.Def.Types_s.nat32 202182159 134810123 67438087 66051) /\ (forall
(va_x_efl:Vale.X64.Flags.t) (va_x_r12:nat64) (va_x_xmm1:quad32) (va_x_xmm2:quad32)
(va_x_xmm3:quad32) (va_x_xmm4:quad32) (va_x_xmm5:quad32) (va_x_xmm6:quad32) . let va_sM =
va_upd_xmm 6 va_x_xmm6 (va_upd_xmm 5 va_x_xmm5 (va_upd_xmm 4 va_x_xmm4 (va_upd_xmm 3 va_x_xmm3
(va_upd_xmm 2 va_x_xmm2 (va_upd_xmm 1 va_x_xmm1 (va_upd_reg64 rR12 va_x_r12 (va_upd_flags
va_x_efl va_s0))))))) in va_get_ok va_sM /\ va_get_xmm 1 va_sM ==
Vale.Def.Types_s.reverse_bytes_quad32 (Vale.AES.GF128_s.gf128_to_quad32
(Vale.AES.GF128_s.gf128_mul a b)) ==> va_k va_sM (())))
val va_wpProof_ReduceMul128_LE : a:poly -> b:poly -> va_s0:va_state -> va_k:(va_state -> unit ->
Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_ReduceMul128_LE a b va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_ReduceMul128_LE ()) ([va_Mod_xmm 6;
va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2; va_Mod_xmm 1; va_Mod_reg64 rR12;
va_Mod_flags]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@"opaque_to_smt"]
let va_wpProof_ReduceMul128_LE a b va_s0 va_k =
let (va_sM, va_f0) = va_lemma_ReduceMul128_LE (va_code_ReduceMul128_LE ()) va_s0 a b in
va_lemma_upd_update va_sM;
assert (va_state_eq va_sM (va_update_xmm 6 va_sM (va_update_xmm 5 va_sM (va_update_xmm 4 va_sM
(va_update_xmm 3 va_sM (va_update_xmm 2 va_sM (va_update_xmm 1 va_sM (va_update_reg64 rR12
va_sM (va_update_flags va_sM (va_update_ok va_sM va_s0))))))))));
va_lemma_norm_mods ([va_Mod_xmm 6; va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2;
va_Mod_xmm 1; va_Mod_reg64 rR12; va_Mod_flags]) va_sM va_s0;
let va_g = () in
(va_sM, va_f0, va_g)
[@ "opaque_to_smt" va_qattr]
let va_quick_ReduceMul128_LE (a:poly) (b:poly) : (va_quickCode unit (va_code_ReduceMul128_LE ())) =
(va_QProc (va_code_ReduceMul128_LE ()) ([va_Mod_xmm 6; va_Mod_xmm 5; va_Mod_xmm 4; va_Mod_xmm 3;
va_Mod_xmm 2; va_Mod_xmm 1; va_Mod_reg64 rR12; va_Mod_flags]) (va_wp_ReduceMul128_LE a b)
(va_wpProof_ReduceMul128_LE a b))
//--
//-- Compute_ghash_incremental_register
[@ "opaque_to_smt" va_qattr]
let va_code_Compute_ghash_incremental_register () =
(va_Block (va_CCons (va_code_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 2)) (va_CCons (va_code_Mov128
(va_op_xmm_xmm 2) (va_op_xmm_xmm 11)) (va_CCons (va_code_ReduceMul128_LE ()) (va_CNil ())))))
[@ "opaque_to_smt" va_qattr]
let va_codegen_success_Compute_ghash_incremental_register () =
(va_pbool_and (va_codegen_success_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 2)) (va_pbool_and
(va_codegen_success_Mov128 (va_op_xmm_xmm 2) (va_op_xmm_xmm 11)) (va_pbool_and
(va_codegen_success_ReduceMul128_LE ()) (va_ttrue ()))))
[@ "opaque_to_smt" va_qattr]
let va_qcode_Compute_ghash_incremental_register (va_mods:va_mods_t) : (va_quickCode unit
(va_code_Compute_ghash_incremental_register ())) =
(qblock va_mods (fun (va_s:va_state) -> let (va_old_s:va_state) = va_s in va_QSeq va_range1
"***** PRECONDITION NOT MET AT line 124 column 9 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Pxor (va_op_xmm_xmm 1) (va_op_xmm_xmm 2)) (va_QBind va_range1
"***** PRECONDITION NOT MET AT line 125 column 11 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_Mov128 (va_op_xmm_xmm 2) (va_op_xmm_xmm 11)) (fun (va_s:va_state) _ -> va_QBind
va_range1
"***** PRECONDITION NOT MET AT line 127 column 20 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(va_quick_ReduceMul128_LE (Vale.AES.GF128_s.gf128_of_quad32
(Vale.Def.Types_s.reverse_bytes_quad32 (va_get_xmm 1 va_s))) (Vale.AES.GF128_s.gf128_of_quad32
(va_get_xmm 11 va_s))) (fun (va_s:va_state) _ -> va_qPURE va_range1
"***** PRECONDITION NOT MET AT line 128 column 29 of file /home/gebner/everest/hacl-star/vale/code/crypto/aes/x64/Vale.AES.X64.GHash.vaf *****"
(fun (_:unit) -> Vale.AES.GHash.ghash_incremental_reveal ()) (va_QEmpty (()))))))) | false | false | Vale.AES.X64.GHash.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 30,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val va_lemma_Compute_ghash_incremental_register : va_b0:va_code -> va_s0:va_state
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_Compute_ghash_incremental_register ()) va_s0 /\
va_get_ok va_s0 /\ (pclmulqdq_enabled /\ avx_enabled /\ sse_enabled /\ va_get_xmm 8 va_s0 ==
Vale.Def.Words_s.Mkfour #Vale.Def.Types_s.nat32 202182159 134810123 67438087 66051)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_get_xmm 1 va_sM == Vale.AES.GHash.ghash_incremental (Vale.Def.Types_s.reverse_bytes_quad32
(va_get_xmm 11 va_sM)) (va_get_xmm 1 va_s0) (FStar.Seq.Base.create #quad32 1 (va_get_xmm 2
va_s0)) /\ va_state_eq va_sM (va_update_xmm 6 va_sM (va_update_xmm 5 va_sM (va_update_xmm 4
va_sM (va_update_xmm 3 va_sM (va_update_xmm 2 va_sM (va_update_reg64 rR12 va_sM
(va_update_flags va_sM (va_update_xmm 1 va_sM (va_update_ok va_sM va_s0))))))))))) | [] | Vale.AES.X64.GHash.va_lemma_Compute_ghash_incremental_register | {
"file_name": "obj/Vale.AES.X64.GHash.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | va_b0: Vale.X64.Decls.va_code -> va_s0: Vale.X64.Decls.va_state
-> Prims.Ghost (Vale.X64.Decls.va_state * Vale.X64.Decls.va_fuel) | {
"end_col": 16,
"end_line": 244,
"start_col": 61,
"start_line": 229
} |
Prims.Tot | val return_req (p: vprop) : req_t p | [
{
"abbrev": true,
"full_module": "FStar.FunctionalExtensionality",
"short_module": "FExt"
},
{
"abbrev": false,
"full_module": "Steel.Semantics.Instantiate",
"short_module": null
},
{
"abbrev": true,
"full_module": "Steel.Memory",
"short_module": "Mem"
},
{
"abbrev": true,
"full_module": "Steel.Semantics.Hoare.MST",
"short_module": "Sem"
},
{
"abbrev": false,
"full_module": "Steel.Effect.Common",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Tactics",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Ghost",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.FunctionalExtensionality",
"short_module": "FExt"
},
{
"abbrev": true,
"full_module": "Steel.Memory",
"short_module": "Mem"
},
{
"abbrev": false,
"full_module": "Steel.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let return_req (p:vprop) : req_t p = fun _ -> True | val return_req (p: vprop) : req_t p
let return_req (p: vprop) : req_t p = | false | null | false | fun _ -> True | {
"checked_file": "Steel.Effect.fsti.checked",
"dependencies": [
"Steel.Memory.fsti.checked",
"Steel.Effect.Common.fsti.checked",
"prims.fst.checked",
"FStar.Tactics.fst.checked",
"FStar.Set.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Ghost.fsti.checked",
"FStar.FunctionalExtensionality.fsti.checked"
],
"interface_file": false,
"source_file": "Steel.Effect.fsti"
} | [
"total"
] | [
"Steel.Effect.Common.vprop",
"Steel.Effect.Common.rmem",
"Prims.l_True",
"Steel.Effect.Common.req_t"
] | [] | (*
Copyright 2020 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module Steel.Effect
open Steel.Memory
module Mem = Steel.Memory
module FExt = FStar.FunctionalExtensionality
open FStar.Ghost
module T = FStar.Tactics
include Steel.Effect.Common
/// This module defines the main Steel effect, with requires and ensures predicates operating on
/// selectors, which will be discharged by SMT
#set-options "--warn_error -330" //turn off the experimental feature warning
#set-options "--ide_id_info_off"
(* Defining the Steel effect with selectors *)
/// The underlying representation of Steel computations.
/// The framed bit indicates whether this computation has already been framed. This corresponds to the |- and |-_F modalities
/// in the ICFP21 paper
val repr (a:Type) (framed:bool) (pre:pre_t) (post:post_t a) (req:req_t pre) (ens:ens_t pre a post) : Type u#2
/// Logical precondition of the return combinator | false | false | Steel.Effect.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val return_req (p: vprop) : req_t p | [] | Steel.Effect.return_req | {
"file_name": "lib/steel/Steel.Effect.fsti",
"git_rev": "7fbb54e94dd4f48ff7cb867d3bae6889a635541e",
"git_url": "https://github.com/FStarLang/steel.git",
"project_name": "steel"
} | p: Steel.Effect.Common.vprop -> Steel.Effect.Common.req_t p | {
"end_col": 50,
"end_line": 42,
"start_col": 37,
"start_line": 42
} |
Prims.Tot | val bind_pure_steel__ens
(#a #b: Type)
(wp: pure_wp a)
(#pre: pre_t)
(#post: post_t b)
(ens: (a -> ens_t pre b post))
: ens_t pre b post | [
{
"abbrev": true,
"full_module": "FStar.FunctionalExtensionality",
"short_module": "FExt"
},
{
"abbrev": false,
"full_module": "Steel.Semantics.Instantiate",
"short_module": null
},
{
"abbrev": true,
"full_module": "Steel.Memory",
"short_module": "Mem"
},
{
"abbrev": true,
"full_module": "Steel.Semantics.Hoare.MST",
"short_module": "Sem"
},
{
"abbrev": false,
"full_module": "Steel.Effect.Common",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Tactics",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Ghost",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.FunctionalExtensionality",
"short_module": "FExt"
},
{
"abbrev": true,
"full_module": "Steel.Memory",
"short_module": "Mem"
},
{
"abbrev": false,
"full_module": "Steel.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let bind_pure_steel__ens (#a:Type) (#b:Type)
(wp:pure_wp a)
(#pre:pre_t) (#post:post_t b) (ens:a -> ens_t pre b post)
: ens_t pre b post
= fun m0 r m1 -> (as_requires wp /\ (exists (x:a). as_ensures wp x /\ ((ens x) m0 r m1))) | val bind_pure_steel__ens
(#a #b: Type)
(wp: pure_wp a)
(#pre: pre_t)
(#post: post_t b)
(ens: (a -> ens_t pre b post))
: ens_t pre b post
let bind_pure_steel__ens
(#a #b: Type)
(wp: pure_wp a)
(#pre: pre_t)
(#post: post_t b)
(ens: (a -> ens_t pre b post))
: ens_t pre b post = | false | null | false | fun m0 r m1 -> (as_requires wp /\ (exists (x: a). as_ensures wp x /\ ((ens x) m0 r m1))) | {
"checked_file": "Steel.Effect.fsti.checked",
"dependencies": [
"Steel.Memory.fsti.checked",
"Steel.Effect.Common.fsti.checked",
"prims.fst.checked",
"FStar.Tactics.fst.checked",
"FStar.Set.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Ghost.fsti.checked",
"FStar.FunctionalExtensionality.fsti.checked"
],
"interface_file": false,
"source_file": "Steel.Effect.fsti"
} | [
"total"
] | [
"Prims.pure_wp",
"Steel.Effect.Common.pre_t",
"Steel.Effect.Common.post_t",
"Steel.Effect.Common.ens_t",
"Steel.Effect.Common.rmem",
"Prims.l_and",
"Prims.as_requires",
"Prims.l_Exists",
"Prims.as_ensures"
] | [] | (*
Copyright 2020 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module Steel.Effect
open Steel.Memory
module Mem = Steel.Memory
module FExt = FStar.FunctionalExtensionality
open FStar.Ghost
module T = FStar.Tactics
include Steel.Effect.Common
/// This module defines the main Steel effect, with requires and ensures predicates operating on
/// selectors, which will be discharged by SMT
#set-options "--warn_error -330" //turn off the experimental feature warning
#set-options "--ide_id_info_off"
(* Defining the Steel effect with selectors *)
/// The underlying representation of Steel computations.
/// The framed bit indicates whether this computation has already been framed. This corresponds to the |- and |-_F modalities
/// in the ICFP21 paper
val repr (a:Type) (framed:bool) (pre:pre_t) (post:post_t a) (req:req_t pre) (ens:ens_t pre a post) : Type u#2
/// Logical precondition of the return combinator
unfold
let return_req (p:vprop) : req_t p = fun _ -> True
/// Logical postcondition of the return combinator:
/// The returned value [r] corresponds to the value passed to the return [x],
/// and return leaves selectors of all resources in [p] unchanged
unfold
let return_ens (a:Type) (x:a) (p:a -> vprop) : ens_t (p x) a p =
fun (h0:rmem (p x)) (r:a) (h1:rmem (p r)) ->
r == x /\ frame_equalities (p x) h0 (focus_rmem h1 (p x))
/// Monadic return combinator for the Steel effect. It is parametric in the postcondition
/// The vprop precondition is annotated with the return_pre predicate to enable special handling,
/// as explained in Steel.Effect.Common
val return_ (a:Type) (x:a) (#[@@@ framing_implicit] p:a -> vprop)
: repr a true (return_pre (p x)) p (return_req (p x)) (return_ens a x p)
/// Logical precondition for the composition (bind) of two Steel computations:
/// The postcondition of the first computation must imply the precondition of the second computation,
/// and also ensure that any equalities abducted during frame inference inside the predicate [pr] are satisfied
unfold
let bind_req (#a:Type)
(#pre_f:pre_t) (#post_f:post_t a)
(req_f:req_t pre_f) (ens_f:ens_t pre_f a post_f)
(#pre_g:a -> pre_t)
(#pr:a -> prop)
(req_g:(x:a -> req_t (pre_g x)))
(frame_f:vprop) (frame_g:a -> vprop)
(_:squash (can_be_split_forall_dep pr (fun x -> post_f x `star` frame_f) (fun x -> pre_g x `star` frame_g x)))
: req_t (pre_f `star` frame_f)
= fun m0 ->
req_f (focus_rmem m0 pre_f) /\
(forall (x:a) (h1:hmem (post_f x `star` frame_f)).
(ens_f (focus_rmem m0 pre_f) x (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) (post_f x)) /\
frame_equalities frame_f (focus_rmem m0 frame_f) (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) frame_f))
==> pr x /\
(can_be_split_trans (post_f x `star` frame_f) (pre_g x `star` frame_g x) (pre_g x);
(req_g x) (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) (pre_g x))))
/// Logical postcondition for the composition (bind) of two Steel computations:
/// The precondition of the first computation was satisfied in the initial state, and there
/// exists an intermediate state where the two-state postcondition of the first computation was
/// satisfied, and which yields the validity of the two-state postcondition of the second computation
/// on the final state [m2] with the returned value [y]
/// Note that the ensures for the bind below asserts req_f
/// This is not necessary, but an explicit assert may help the solver
unfold
let bind_ens (#a:Type) (#b:Type)
(#pre_f:pre_t) (#post_f:post_t a)
(req_f:req_t pre_f) (ens_f:ens_t pre_f a post_f)
(#pre_g:a -> pre_t) (#post_g:a -> post_t b)
(#pr:a -> prop)
(ens_g:(x:a -> ens_t (pre_g x) b (post_g x)))
(frame_f:vprop) (frame_g:a -> vprop)
(post:post_t b)
(_:squash (can_be_split_forall_dep pr (fun x -> post_f x `star` frame_f) (fun x -> pre_g x `star` frame_g x)))
(_:squash (can_be_split_post (fun x y -> post_g x y `star` frame_g x) post))
: ens_t (pre_f `star` frame_f) b post
= fun m0 y m2 ->
req_f (focus_rmem m0 pre_f) /\
(exists (x:a) (h1:hmem (post_f x `star` frame_f)).
pr x /\
(
can_be_split_trans (post_f x `star` frame_f) (pre_g x `star` frame_g x) (pre_g x);
can_be_split_trans (post_f x `star` frame_f) (pre_g x `star` frame_g x) (frame_g x);
can_be_split_trans (post y) (post_g x y `star` frame_g x) (post_g x y);
can_be_split_trans (post y) (post_g x y `star` frame_g x) (frame_g x);
frame_equalities frame_f (focus_rmem m0 frame_f) (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) frame_f) /\
frame_equalities (frame_g x) (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) (frame_g x)) (focus_rmem m2 (frame_g x)) /\
ens_f (focus_rmem m0 pre_f) x (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) (post_f x)) /\
(ens_g x) (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) (pre_g x)) y (focus_rmem m2 (post_g x y))))
/// Steel effect combinator to compose two Steel computations
/// Separation logic VCs are squashed goals passed as implicits, annotated with the framing_implicit
/// attribute. This indicates that they will be discharged by the tactic in Steel.Effect.Common
/// Requires/ensures logical VCs are defined using weakest preconditions combinators defined above,
/// and discharged by SMT.
val bind (a:Type) (b:Type)
(#framed_f:eqtype_as_type bool)
(#framed_g:eqtype_as_type bool)
(#[@@@ framing_implicit] pre_f:pre_t) (#[@@@ framing_implicit] post_f:post_t a)
(#[@@@ framing_implicit] req_f:req_t pre_f) (#[@@@ framing_implicit] ens_f:ens_t pre_f a post_f)
(#[@@@ framing_implicit] pre_g:a -> pre_t) (#[@@@ framing_implicit] post_g:a -> post_t b)
(#[@@@ framing_implicit] req_g:(x:a -> req_t (pre_g x))) (#[@@@ framing_implicit] ens_g:(x:a -> ens_t (pre_g x) b (post_g x)))
(#[@@@ framing_implicit] frame_f:vprop) (#[@@@ framing_implicit] frame_g:a -> vprop)
(#[@@@ framing_implicit] post:post_t b)
(#[@@@ framing_implicit] _ : squash (maybe_emp framed_f frame_f))
(#[@@@ framing_implicit] _ : squash (maybe_emp_dep framed_g frame_g))
(#[@@@ framing_implicit] pr:a -> prop)
(#[@@@ framing_implicit] p1:squash (can_be_split_forall_dep pr
(fun x -> post_f x `star` frame_f) (fun x -> pre_g x `star` frame_g x)))
(#[@@@ framing_implicit] p2:squash (can_be_split_post (fun x y -> post_g x y `star` frame_g x) post))
(f:repr a framed_f pre_f post_f req_f ens_f)
(g:(x:a -> repr b framed_g (pre_g x) (post_g x) (req_g x) (ens_g x)))
: repr b
true
(pre_f `star` frame_f)
post
(bind_req req_f ens_f req_g frame_f frame_g p1)
(bind_ens req_f ens_f ens_g frame_f frame_g post p1 p2)
/// Logical precondition for subtyping relation for Steel computation.
unfold
let subcomp_pre (#a:Type)
(#pre_f:pre_t) (#post_f:post_t a) (req_f:req_t pre_f) (ens_f:ens_t pre_f a post_f)
(#pre_g:pre_t) (#post_g:post_t a) (req_g:req_t pre_g) (ens_g:ens_t pre_g a post_g)
(#frame:vprop)
(#pr:prop)
(_:squash (can_be_split_dep pr pre_g (pre_f `star` frame)))
(_:squash (equiv_forall post_g (fun x -> post_f x `star` frame)))
: pure_pre
// The call to with_tactic allows us to reduce VCs in a controlled way, once all
// uvars have been resolved.
// To ensure an SMT-friendly encoding of the VC, it needs to be encapsulated in a squash call
= T.rewrite_with_tactic vc_norm (squash (
(forall (h0:hmem pre_g). req_g (mk_rmem pre_g h0) ==> pr /\
(can_be_split_trans pre_g (pre_f `star` frame) pre_f;
req_f (focus_rmem (mk_rmem pre_g h0) pre_f))) /\
(forall (h0:hmem pre_g) (x:a) (h1:hmem (post_g x)). (
pr ==> (
can_be_split_trans (post_g x) (post_f x `star` frame) (post_f x);
can_be_split_trans (pre_g) (pre_f `star` frame) frame;
can_be_split_trans (post_g x) (post_f x `star` frame) frame;
can_be_split_trans pre_g (pre_f `star` frame) pre_f;
(req_g (mk_rmem pre_g h0) /\
ens_f (focus_rmem (mk_rmem pre_g h0) pre_f) x (focus_rmem (mk_rmem (post_g x) h1) (post_f x)) /\
frame_equalities frame
(focus_rmem (mk_rmem pre_g h0) frame)
(focus_rmem (mk_rmem (post_g x) h1) frame))
==> ens_g (mk_rmem pre_g h0) x (mk_rmem (post_g x) h1))
))
))
/// Subtyping combinator for Steel computations.
/// Computation [f] is given type `repr a framed_g pre_g post_g req_g ens_g`.
/// As for bind, separation logic goals are encoded as squashed implicits which will be discharged
/// by tactic, while logical requires/ensures operating on selectors are discharged by SMT
val subcomp (a:Type)
(#framed_f:eqtype_as_type bool)
(#framed_g:eqtype_as_type bool)
(#[@@@ framing_implicit] pre_f:pre_t) (#[@@@ framing_implicit] post_f:post_t a)
(#[@@@ framing_implicit] req_f:req_t pre_f) (#[@@@ framing_implicit] ens_f:ens_t pre_f a post_f)
(#[@@@ framing_implicit] pre_g:pre_t) (#[@@@ framing_implicit] post_g:post_t a)
(#[@@@ framing_implicit] req_g:req_t pre_g) (#[@@@ framing_implicit] ens_g:ens_t pre_g a post_g)
(#[@@@ framing_implicit] frame:vprop)
(#[@@@ framing_implicit] _ : squash (maybe_emp framed_f frame))
(#[@@@ framing_implicit] pr : prop)
(#[@@@ framing_implicit] p1:squash (can_be_split_dep pr pre_g (pre_f `star` frame)))
(#[@@@ framing_implicit] p2:squash (equiv_forall post_g (fun x -> post_f x `star` frame)))
(f:repr a framed_f pre_f post_f req_f ens_f)
: Pure (repr a framed_g pre_g post_g req_g ens_g)
(requires subcomp_pre req_f ens_f req_g ens_g p1 p2)
(ensures fun _ -> True)
/// Logical precondition for the if_then_else combinator
unfold
let if_then_else_req
(#pre_f:pre_t) (#pre_g:pre_t) (#frame_f #frame_g:vprop) (#pr: prop)
(s_pre: squash (can_be_split_dep pr (pre_f `star` frame_f) (pre_g `star` frame_g)))
(req_then:req_t pre_f) (req_else:req_t pre_g)
(p:Type0)
: req_t (pre_f `star` frame_f)
= fun h -> pr /\ (
can_be_split_trans (pre_f `star` frame_f) (pre_g `star` frame_g) pre_g;
(p ==> req_then (focus_rmem h pre_f)) /\
((~ p) ==> req_else (focus_rmem h pre_g)))
/// Logical postcondition for the if_then_else combinator
unfold
let if_then_else_ens (#a:Type)
(#pre_f:pre_t) (#pre_g:pre_t) (#post_f:post_t a) (#post_g:post_t a)
(#frame_f #frame_g:vprop) (#pr:prop)
(s1: squash (can_be_split_dep pr (pre_f `star` frame_f) (pre_g `star` frame_g)))
(s2: squash (equiv_forall (fun x -> post_f x `star` frame_f) (fun x -> post_g x `star` frame_g)))
(ens_then:ens_t pre_f a post_f) (ens_else:ens_t pre_g a post_g)
(p:Type0)
: ens_t (pre_f `star` frame_f) a (fun x -> post_f x `star` frame_f)
= fun h0 x h1 -> pr /\ (
can_be_split_trans (pre_f `star` frame_f) (pre_g `star` frame_g) pre_g;
can_be_split_trans (post_f x `star` frame_f) (post_g x `star` frame_g) (post_g x);
(p ==> ens_then (focus_rmem h0 pre_f) x (focus_rmem h1 (post_f x))) /\
((~ p) ==> ens_else (focus_rmem h0 pre_g) x (focus_rmem h1 (post_g x))))
/// If_then_else combinator for Steel computations.
/// The soundness of this combinator is automatically proven with respect to the subcomp
/// subtyping combinator defined above by the F* layered effects framework
let if_then_else (a:Type)
(#framed_f:eqtype_as_type bool)
(#framed_g:eqtype_as_type bool)
(#[@@@ framing_implicit] pre_f:pre_t) (#[@@@ framing_implicit] pre_g:pre_t)
(#[@@@ framing_implicit] post_f:post_t a) (#[@@@ framing_implicit] post_g:post_t a)
(#[@@@ framing_implicit] req_then:req_t pre_f) (#[@@@ framing_implicit] ens_then:ens_t pre_f a post_f)
(#[@@@ framing_implicit] req_else:req_t pre_g) (#[@@@ framing_implicit] ens_else:ens_t pre_g a post_g)
(#[@@@ framing_implicit] frame_f : vprop)
(#[@@@ framing_implicit] frame_g : vprop)
(#[@@@ framing_implicit] pr : prop)
(#[@@@ framing_implicit] me1 : squash (maybe_emp framed_f frame_f))
(#[@@@ framing_implicit] me2 : squash (maybe_emp framed_g frame_g))
(#[@@@ framing_implicit] s_pre: squash (can_be_split_dep pr (pre_f `star` frame_f) (pre_g `star` frame_g)))
(#[@@@ framing_implicit] s_post: squash (equiv_forall (fun x -> post_f x `star` frame_f) (fun x -> post_g x `star` frame_g)))
(f:repr a framed_f pre_f post_f req_then ens_then)
(g:repr a framed_g pre_g post_g req_else ens_else)
(p:bool)
: Type
= repr a true (pre_f `star` frame_f) (fun x -> post_f x `star` frame_f)
(if_then_else_req s_pre req_then req_else p)
(if_then_else_ens s_pre s_post ens_then ens_else p)
/// Assembling the combinators defined above into an actual effect
/// If the effect appears at the top-level, make sure it is constrained as per STTop
[@@ ite_soundness_by ite_attr;
top_level_effect "Steel.Effect.SteelTop";
primitive_extraction ]
reflectable
effect {
SteelBase
(a:Type) (framed:bool) (pre:pre_t) (post:post_t a) (_:req_t pre) (_:ens_t pre a post)
with { repr = repr;
return = return_;
bind = bind;
subcomp = subcomp;
if_then_else = if_then_else }
}
//
// Trivial preconditions for top-level effect
//
effect SteelTop (a:Type) (framed:bool) (post:post_t a) (ens:ens_t emp a post) =
SteelBase a framed emp post (return_req _) ens
/// The two user-facing effects, corresponding to not yet framed (Steel) and already framed (SteelF)
/// computations. In the ICFP21 paper, this is modeled by the |- and |-_F modalities
effect Steel (a:Type) (pre:pre_t) (post:post_t a) (req:req_t pre) (ens:ens_t pre a post) =
SteelBase a false pre post req ens
effect SteelF (a:Type) (pre:pre_t) (post:post_t a) (req:req_t pre) (ens:ens_t pre a post) =
SteelBase a true pre post req ens
(* Composing Steel and Pure computations *)
/// Logical precondition of a Pure and a Steel computation composition.
/// The current state (memory) must satisfy the precondition of the Steel computation,
/// and the wp of the PURE computation `as_requires wp` must also be satisfied
unfold
let bind_pure_steel__req (#a:Type) (wp:pure_wp a)
(#pre:pre_t) (req:a -> req_t pre)
: req_t pre
= fun m -> (wp (fun x -> (req x) m))
/// Logical postcondition of a Pure and a Steel composition.
/// There exists an intermediate value (the output of the Pure computation) such that
/// the postcondition of the pure computation is satisfied.
unfold
let bind_pure_steel__ens (#a:Type) (#b:Type)
(wp:pure_wp a)
(#pre:pre_t) (#post:post_t b) (ens:a -> ens_t pre b post) | false | false | Steel.Effect.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val bind_pure_steel__ens
(#a #b: Type)
(wp: pure_wp a)
(#pre: pre_t)
(#post: post_t b)
(ens: (a -> ens_t pre b post))
: ens_t pre b post | [] | Steel.Effect.bind_pure_steel__ens | {
"file_name": "lib/steel/Steel.Effect.fsti",
"git_rev": "7fbb54e94dd4f48ff7cb867d3bae6889a635541e",
"git_url": "https://github.com/FStarLang/steel.git",
"project_name": "steel"
} | wp: Prims.pure_wp a -> ens: (_: a -> Steel.Effect.Common.ens_t pre b post)
-> Steel.Effect.Common.ens_t pre b post | {
"end_col": 89,
"end_line": 305,
"start_col": 2,
"start_line": 305
} |
Prims.Tot | val bind_pure_steel__req (#a: Type) (wp: pure_wp a) (#pre: pre_t) (req: (a -> req_t pre))
: req_t pre | [
{
"abbrev": true,
"full_module": "FStar.FunctionalExtensionality",
"short_module": "FExt"
},
{
"abbrev": false,
"full_module": "Steel.Semantics.Instantiate",
"short_module": null
},
{
"abbrev": true,
"full_module": "Steel.Memory",
"short_module": "Mem"
},
{
"abbrev": true,
"full_module": "Steel.Semantics.Hoare.MST",
"short_module": "Sem"
},
{
"abbrev": false,
"full_module": "Steel.Effect.Common",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Tactics",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Ghost",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.FunctionalExtensionality",
"short_module": "FExt"
},
{
"abbrev": true,
"full_module": "Steel.Memory",
"short_module": "Mem"
},
{
"abbrev": false,
"full_module": "Steel.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let bind_pure_steel__req (#a:Type) (wp:pure_wp a)
(#pre:pre_t) (req:a -> req_t pre)
: req_t pre
= fun m -> (wp (fun x -> (req x) m)) | val bind_pure_steel__req (#a: Type) (wp: pure_wp a) (#pre: pre_t) (req: (a -> req_t pre))
: req_t pre
let bind_pure_steel__req (#a: Type) (wp: pure_wp a) (#pre: pre_t) (req: (a -> req_t pre))
: req_t pre = | false | null | false | fun m -> (wp (fun x -> (req x) m)) | {
"checked_file": "Steel.Effect.fsti.checked",
"dependencies": [
"Steel.Memory.fsti.checked",
"Steel.Effect.Common.fsti.checked",
"prims.fst.checked",
"FStar.Tactics.fst.checked",
"FStar.Set.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Ghost.fsti.checked",
"FStar.FunctionalExtensionality.fsti.checked"
],
"interface_file": false,
"source_file": "Steel.Effect.fsti"
} | [
"total"
] | [
"Prims.pure_wp",
"Steel.Effect.Common.pre_t",
"Steel.Effect.Common.req_t",
"Steel.Effect.Common.rmem",
"Prims.l_True"
] | [] | (*
Copyright 2020 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module Steel.Effect
open Steel.Memory
module Mem = Steel.Memory
module FExt = FStar.FunctionalExtensionality
open FStar.Ghost
module T = FStar.Tactics
include Steel.Effect.Common
/// This module defines the main Steel effect, with requires and ensures predicates operating on
/// selectors, which will be discharged by SMT
#set-options "--warn_error -330" //turn off the experimental feature warning
#set-options "--ide_id_info_off"
(* Defining the Steel effect with selectors *)
/// The underlying representation of Steel computations.
/// The framed bit indicates whether this computation has already been framed. This corresponds to the |- and |-_F modalities
/// in the ICFP21 paper
val repr (a:Type) (framed:bool) (pre:pre_t) (post:post_t a) (req:req_t pre) (ens:ens_t pre a post) : Type u#2
/// Logical precondition of the return combinator
unfold
let return_req (p:vprop) : req_t p = fun _ -> True
/// Logical postcondition of the return combinator:
/// The returned value [r] corresponds to the value passed to the return [x],
/// and return leaves selectors of all resources in [p] unchanged
unfold
let return_ens (a:Type) (x:a) (p:a -> vprop) : ens_t (p x) a p =
fun (h0:rmem (p x)) (r:a) (h1:rmem (p r)) ->
r == x /\ frame_equalities (p x) h0 (focus_rmem h1 (p x))
/// Monadic return combinator for the Steel effect. It is parametric in the postcondition
/// The vprop precondition is annotated with the return_pre predicate to enable special handling,
/// as explained in Steel.Effect.Common
val return_ (a:Type) (x:a) (#[@@@ framing_implicit] p:a -> vprop)
: repr a true (return_pre (p x)) p (return_req (p x)) (return_ens a x p)
/// Logical precondition for the composition (bind) of two Steel computations:
/// The postcondition of the first computation must imply the precondition of the second computation,
/// and also ensure that any equalities abducted during frame inference inside the predicate [pr] are satisfied
unfold
let bind_req (#a:Type)
(#pre_f:pre_t) (#post_f:post_t a)
(req_f:req_t pre_f) (ens_f:ens_t pre_f a post_f)
(#pre_g:a -> pre_t)
(#pr:a -> prop)
(req_g:(x:a -> req_t (pre_g x)))
(frame_f:vprop) (frame_g:a -> vprop)
(_:squash (can_be_split_forall_dep pr (fun x -> post_f x `star` frame_f) (fun x -> pre_g x `star` frame_g x)))
: req_t (pre_f `star` frame_f)
= fun m0 ->
req_f (focus_rmem m0 pre_f) /\
(forall (x:a) (h1:hmem (post_f x `star` frame_f)).
(ens_f (focus_rmem m0 pre_f) x (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) (post_f x)) /\
frame_equalities frame_f (focus_rmem m0 frame_f) (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) frame_f))
==> pr x /\
(can_be_split_trans (post_f x `star` frame_f) (pre_g x `star` frame_g x) (pre_g x);
(req_g x) (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) (pre_g x))))
/// Logical postcondition for the composition (bind) of two Steel computations:
/// The precondition of the first computation was satisfied in the initial state, and there
/// exists an intermediate state where the two-state postcondition of the first computation was
/// satisfied, and which yields the validity of the two-state postcondition of the second computation
/// on the final state [m2] with the returned value [y]
/// Note that the ensures for the bind below asserts req_f
/// This is not necessary, but an explicit assert may help the solver
unfold
let bind_ens (#a:Type) (#b:Type)
(#pre_f:pre_t) (#post_f:post_t a)
(req_f:req_t pre_f) (ens_f:ens_t pre_f a post_f)
(#pre_g:a -> pre_t) (#post_g:a -> post_t b)
(#pr:a -> prop)
(ens_g:(x:a -> ens_t (pre_g x) b (post_g x)))
(frame_f:vprop) (frame_g:a -> vprop)
(post:post_t b)
(_:squash (can_be_split_forall_dep pr (fun x -> post_f x `star` frame_f) (fun x -> pre_g x `star` frame_g x)))
(_:squash (can_be_split_post (fun x y -> post_g x y `star` frame_g x) post))
: ens_t (pre_f `star` frame_f) b post
= fun m0 y m2 ->
req_f (focus_rmem m0 pre_f) /\
(exists (x:a) (h1:hmem (post_f x `star` frame_f)).
pr x /\
(
can_be_split_trans (post_f x `star` frame_f) (pre_g x `star` frame_g x) (pre_g x);
can_be_split_trans (post_f x `star` frame_f) (pre_g x `star` frame_g x) (frame_g x);
can_be_split_trans (post y) (post_g x y `star` frame_g x) (post_g x y);
can_be_split_trans (post y) (post_g x y `star` frame_g x) (frame_g x);
frame_equalities frame_f (focus_rmem m0 frame_f) (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) frame_f) /\
frame_equalities (frame_g x) (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) (frame_g x)) (focus_rmem m2 (frame_g x)) /\
ens_f (focus_rmem m0 pre_f) x (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) (post_f x)) /\
(ens_g x) (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) (pre_g x)) y (focus_rmem m2 (post_g x y))))
/// Steel effect combinator to compose two Steel computations
/// Separation logic VCs are squashed goals passed as implicits, annotated with the framing_implicit
/// attribute. This indicates that they will be discharged by the tactic in Steel.Effect.Common
/// Requires/ensures logical VCs are defined using weakest preconditions combinators defined above,
/// and discharged by SMT.
val bind (a:Type) (b:Type)
(#framed_f:eqtype_as_type bool)
(#framed_g:eqtype_as_type bool)
(#[@@@ framing_implicit] pre_f:pre_t) (#[@@@ framing_implicit] post_f:post_t a)
(#[@@@ framing_implicit] req_f:req_t pre_f) (#[@@@ framing_implicit] ens_f:ens_t pre_f a post_f)
(#[@@@ framing_implicit] pre_g:a -> pre_t) (#[@@@ framing_implicit] post_g:a -> post_t b)
(#[@@@ framing_implicit] req_g:(x:a -> req_t (pre_g x))) (#[@@@ framing_implicit] ens_g:(x:a -> ens_t (pre_g x) b (post_g x)))
(#[@@@ framing_implicit] frame_f:vprop) (#[@@@ framing_implicit] frame_g:a -> vprop)
(#[@@@ framing_implicit] post:post_t b)
(#[@@@ framing_implicit] _ : squash (maybe_emp framed_f frame_f))
(#[@@@ framing_implicit] _ : squash (maybe_emp_dep framed_g frame_g))
(#[@@@ framing_implicit] pr:a -> prop)
(#[@@@ framing_implicit] p1:squash (can_be_split_forall_dep pr
(fun x -> post_f x `star` frame_f) (fun x -> pre_g x `star` frame_g x)))
(#[@@@ framing_implicit] p2:squash (can_be_split_post (fun x y -> post_g x y `star` frame_g x) post))
(f:repr a framed_f pre_f post_f req_f ens_f)
(g:(x:a -> repr b framed_g (pre_g x) (post_g x) (req_g x) (ens_g x)))
: repr b
true
(pre_f `star` frame_f)
post
(bind_req req_f ens_f req_g frame_f frame_g p1)
(bind_ens req_f ens_f ens_g frame_f frame_g post p1 p2)
/// Logical precondition for subtyping relation for Steel computation.
unfold
let subcomp_pre (#a:Type)
(#pre_f:pre_t) (#post_f:post_t a) (req_f:req_t pre_f) (ens_f:ens_t pre_f a post_f)
(#pre_g:pre_t) (#post_g:post_t a) (req_g:req_t pre_g) (ens_g:ens_t pre_g a post_g)
(#frame:vprop)
(#pr:prop)
(_:squash (can_be_split_dep pr pre_g (pre_f `star` frame)))
(_:squash (equiv_forall post_g (fun x -> post_f x `star` frame)))
: pure_pre
// The call to with_tactic allows us to reduce VCs in a controlled way, once all
// uvars have been resolved.
// To ensure an SMT-friendly encoding of the VC, it needs to be encapsulated in a squash call
= T.rewrite_with_tactic vc_norm (squash (
(forall (h0:hmem pre_g). req_g (mk_rmem pre_g h0) ==> pr /\
(can_be_split_trans pre_g (pre_f `star` frame) pre_f;
req_f (focus_rmem (mk_rmem pre_g h0) pre_f))) /\
(forall (h0:hmem pre_g) (x:a) (h1:hmem (post_g x)). (
pr ==> (
can_be_split_trans (post_g x) (post_f x `star` frame) (post_f x);
can_be_split_trans (pre_g) (pre_f `star` frame) frame;
can_be_split_trans (post_g x) (post_f x `star` frame) frame;
can_be_split_trans pre_g (pre_f `star` frame) pre_f;
(req_g (mk_rmem pre_g h0) /\
ens_f (focus_rmem (mk_rmem pre_g h0) pre_f) x (focus_rmem (mk_rmem (post_g x) h1) (post_f x)) /\
frame_equalities frame
(focus_rmem (mk_rmem pre_g h0) frame)
(focus_rmem (mk_rmem (post_g x) h1) frame))
==> ens_g (mk_rmem pre_g h0) x (mk_rmem (post_g x) h1))
))
))
/// Subtyping combinator for Steel computations.
/// Computation [f] is given type `repr a framed_g pre_g post_g req_g ens_g`.
/// As for bind, separation logic goals are encoded as squashed implicits which will be discharged
/// by tactic, while logical requires/ensures operating on selectors are discharged by SMT
val subcomp (a:Type)
(#framed_f:eqtype_as_type bool)
(#framed_g:eqtype_as_type bool)
(#[@@@ framing_implicit] pre_f:pre_t) (#[@@@ framing_implicit] post_f:post_t a)
(#[@@@ framing_implicit] req_f:req_t pre_f) (#[@@@ framing_implicit] ens_f:ens_t pre_f a post_f)
(#[@@@ framing_implicit] pre_g:pre_t) (#[@@@ framing_implicit] post_g:post_t a)
(#[@@@ framing_implicit] req_g:req_t pre_g) (#[@@@ framing_implicit] ens_g:ens_t pre_g a post_g)
(#[@@@ framing_implicit] frame:vprop)
(#[@@@ framing_implicit] _ : squash (maybe_emp framed_f frame))
(#[@@@ framing_implicit] pr : prop)
(#[@@@ framing_implicit] p1:squash (can_be_split_dep pr pre_g (pre_f `star` frame)))
(#[@@@ framing_implicit] p2:squash (equiv_forall post_g (fun x -> post_f x `star` frame)))
(f:repr a framed_f pre_f post_f req_f ens_f)
: Pure (repr a framed_g pre_g post_g req_g ens_g)
(requires subcomp_pre req_f ens_f req_g ens_g p1 p2)
(ensures fun _ -> True)
/// Logical precondition for the if_then_else combinator
unfold
let if_then_else_req
(#pre_f:pre_t) (#pre_g:pre_t) (#frame_f #frame_g:vprop) (#pr: prop)
(s_pre: squash (can_be_split_dep pr (pre_f `star` frame_f) (pre_g `star` frame_g)))
(req_then:req_t pre_f) (req_else:req_t pre_g)
(p:Type0)
: req_t (pre_f `star` frame_f)
= fun h -> pr /\ (
can_be_split_trans (pre_f `star` frame_f) (pre_g `star` frame_g) pre_g;
(p ==> req_then (focus_rmem h pre_f)) /\
((~ p) ==> req_else (focus_rmem h pre_g)))
/// Logical postcondition for the if_then_else combinator
unfold
let if_then_else_ens (#a:Type)
(#pre_f:pre_t) (#pre_g:pre_t) (#post_f:post_t a) (#post_g:post_t a)
(#frame_f #frame_g:vprop) (#pr:prop)
(s1: squash (can_be_split_dep pr (pre_f `star` frame_f) (pre_g `star` frame_g)))
(s2: squash (equiv_forall (fun x -> post_f x `star` frame_f) (fun x -> post_g x `star` frame_g)))
(ens_then:ens_t pre_f a post_f) (ens_else:ens_t pre_g a post_g)
(p:Type0)
: ens_t (pre_f `star` frame_f) a (fun x -> post_f x `star` frame_f)
= fun h0 x h1 -> pr /\ (
can_be_split_trans (pre_f `star` frame_f) (pre_g `star` frame_g) pre_g;
can_be_split_trans (post_f x `star` frame_f) (post_g x `star` frame_g) (post_g x);
(p ==> ens_then (focus_rmem h0 pre_f) x (focus_rmem h1 (post_f x))) /\
((~ p) ==> ens_else (focus_rmem h0 pre_g) x (focus_rmem h1 (post_g x))))
/// If_then_else combinator for Steel computations.
/// The soundness of this combinator is automatically proven with respect to the subcomp
/// subtyping combinator defined above by the F* layered effects framework
let if_then_else (a:Type)
(#framed_f:eqtype_as_type bool)
(#framed_g:eqtype_as_type bool)
(#[@@@ framing_implicit] pre_f:pre_t) (#[@@@ framing_implicit] pre_g:pre_t)
(#[@@@ framing_implicit] post_f:post_t a) (#[@@@ framing_implicit] post_g:post_t a)
(#[@@@ framing_implicit] req_then:req_t pre_f) (#[@@@ framing_implicit] ens_then:ens_t pre_f a post_f)
(#[@@@ framing_implicit] req_else:req_t pre_g) (#[@@@ framing_implicit] ens_else:ens_t pre_g a post_g)
(#[@@@ framing_implicit] frame_f : vprop)
(#[@@@ framing_implicit] frame_g : vprop)
(#[@@@ framing_implicit] pr : prop)
(#[@@@ framing_implicit] me1 : squash (maybe_emp framed_f frame_f))
(#[@@@ framing_implicit] me2 : squash (maybe_emp framed_g frame_g))
(#[@@@ framing_implicit] s_pre: squash (can_be_split_dep pr (pre_f `star` frame_f) (pre_g `star` frame_g)))
(#[@@@ framing_implicit] s_post: squash (equiv_forall (fun x -> post_f x `star` frame_f) (fun x -> post_g x `star` frame_g)))
(f:repr a framed_f pre_f post_f req_then ens_then)
(g:repr a framed_g pre_g post_g req_else ens_else)
(p:bool)
: Type
= repr a true (pre_f `star` frame_f) (fun x -> post_f x `star` frame_f)
(if_then_else_req s_pre req_then req_else p)
(if_then_else_ens s_pre s_post ens_then ens_else p)
/// Assembling the combinators defined above into an actual effect
/// If the effect appears at the top-level, make sure it is constrained as per STTop
[@@ ite_soundness_by ite_attr;
top_level_effect "Steel.Effect.SteelTop";
primitive_extraction ]
reflectable
effect {
SteelBase
(a:Type) (framed:bool) (pre:pre_t) (post:post_t a) (_:req_t pre) (_:ens_t pre a post)
with { repr = repr;
return = return_;
bind = bind;
subcomp = subcomp;
if_then_else = if_then_else }
}
//
// Trivial preconditions for top-level effect
//
effect SteelTop (a:Type) (framed:bool) (post:post_t a) (ens:ens_t emp a post) =
SteelBase a framed emp post (return_req _) ens
/// The two user-facing effects, corresponding to not yet framed (Steel) and already framed (SteelF)
/// computations. In the ICFP21 paper, this is modeled by the |- and |-_F modalities
effect Steel (a:Type) (pre:pre_t) (post:post_t a) (req:req_t pre) (ens:ens_t pre a post) =
SteelBase a false pre post req ens
effect SteelF (a:Type) (pre:pre_t) (post:post_t a) (req:req_t pre) (ens:ens_t pre a post) =
SteelBase a true pre post req ens
(* Composing Steel and Pure computations *)
/// Logical precondition of a Pure and a Steel computation composition.
/// The current state (memory) must satisfy the precondition of the Steel computation,
/// and the wp of the PURE computation `as_requires wp` must also be satisfied
unfold
let bind_pure_steel__req (#a:Type) (wp:pure_wp a)
(#pre:pre_t) (req:a -> req_t pre) | false | false | Steel.Effect.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val bind_pure_steel__req (#a: Type) (wp: pure_wp a) (#pre: pre_t) (req: (a -> req_t pre))
: req_t pre | [] | Steel.Effect.bind_pure_steel__req | {
"file_name": "lib/steel/Steel.Effect.fsti",
"git_rev": "7fbb54e94dd4f48ff7cb867d3bae6889a635541e",
"git_url": "https://github.com/FStarLang/steel.git",
"project_name": "steel"
} | wp: Prims.pure_wp a -> req: (_: a -> Steel.Effect.Common.req_t pre) -> Steel.Effect.Common.req_t pre | {
"end_col": 36,
"end_line": 295,
"start_col": 2,
"start_line": 295
} |
Prims.Tot | val return_ens (a: Type) (x: a) (p: (a -> vprop)) : ens_t (p x) a p | [
{
"abbrev": true,
"full_module": "FStar.FunctionalExtensionality",
"short_module": "FExt"
},
{
"abbrev": false,
"full_module": "Steel.Semantics.Instantiate",
"short_module": null
},
{
"abbrev": true,
"full_module": "Steel.Memory",
"short_module": "Mem"
},
{
"abbrev": true,
"full_module": "Steel.Semantics.Hoare.MST",
"short_module": "Sem"
},
{
"abbrev": false,
"full_module": "Steel.Effect.Common",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Tactics",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Ghost",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.FunctionalExtensionality",
"short_module": "FExt"
},
{
"abbrev": true,
"full_module": "Steel.Memory",
"short_module": "Mem"
},
{
"abbrev": false,
"full_module": "Steel.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let return_ens (a:Type) (x:a) (p:a -> vprop) : ens_t (p x) a p =
fun (h0:rmem (p x)) (r:a) (h1:rmem (p r)) ->
r == x /\ frame_equalities (p x) h0 (focus_rmem h1 (p x)) | val return_ens (a: Type) (x: a) (p: (a -> vprop)) : ens_t (p x) a p
let return_ens (a: Type) (x: a) (p: (a -> vprop)) : ens_t (p x) a p = | false | null | false | fun (h0: rmem (p x)) (r: a) (h1: rmem (p r)) ->
r == x /\ frame_equalities (p x) h0 (focus_rmem h1 (p x)) | {
"checked_file": "Steel.Effect.fsti.checked",
"dependencies": [
"Steel.Memory.fsti.checked",
"Steel.Effect.Common.fsti.checked",
"prims.fst.checked",
"FStar.Tactics.fst.checked",
"FStar.Set.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Ghost.fsti.checked",
"FStar.FunctionalExtensionality.fsti.checked"
],
"interface_file": false,
"source_file": "Steel.Effect.fsti"
} | [
"total"
] | [
"Steel.Effect.Common.vprop",
"Steel.Effect.Common.rmem",
"Prims.l_and",
"Prims.eq2",
"Steel.Effect.Common.frame_equalities",
"Steel.Effect.Common.focus_rmem",
"Steel.Effect.Common.ens_t"
] | [] | (*
Copyright 2020 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module Steel.Effect
open Steel.Memory
module Mem = Steel.Memory
module FExt = FStar.FunctionalExtensionality
open FStar.Ghost
module T = FStar.Tactics
include Steel.Effect.Common
/// This module defines the main Steel effect, with requires and ensures predicates operating on
/// selectors, which will be discharged by SMT
#set-options "--warn_error -330" //turn off the experimental feature warning
#set-options "--ide_id_info_off"
(* Defining the Steel effect with selectors *)
/// The underlying representation of Steel computations.
/// The framed bit indicates whether this computation has already been framed. This corresponds to the |- and |-_F modalities
/// in the ICFP21 paper
val repr (a:Type) (framed:bool) (pre:pre_t) (post:post_t a) (req:req_t pre) (ens:ens_t pre a post) : Type u#2
/// Logical precondition of the return combinator
unfold
let return_req (p:vprop) : req_t p = fun _ -> True
/// Logical postcondition of the return combinator:
/// The returned value [r] corresponds to the value passed to the return [x],
/// and return leaves selectors of all resources in [p] unchanged
unfold | false | false | Steel.Effect.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val return_ens (a: Type) (x: a) (p: (a -> vprop)) : ens_t (p x) a p | [] | Steel.Effect.return_ens | {
"file_name": "lib/steel/Steel.Effect.fsti",
"git_rev": "7fbb54e94dd4f48ff7cb867d3bae6889a635541e",
"git_url": "https://github.com/FStarLang/steel.git",
"project_name": "steel"
} | a: Type -> x: a -> p: (_: a -> Steel.Effect.Common.vprop) -> Steel.Effect.Common.ens_t (p x) a p | {
"end_col": 61,
"end_line": 50,
"start_col": 2,
"start_line": 49
} |
Prims.Tot | val bind_req:
#a: Type ->
#pre_f: pre_t ->
#post_f: post_t a ->
req_f: req_t pre_f ->
ens_f: ens_t pre_f a post_f ->
#pre_g: (a -> pre_t) ->
#pr: (a -> prop) ->
req_g: (x: a -> req_t (pre_g x)) ->
frame_f: vprop ->
frame_g: (a -> vprop) ->
squash (can_be_split_forall_dep pr
(fun x -> (post_f x) `star` frame_f)
(fun x -> (pre_g x) `star` (frame_g x)))
-> req_t (pre_f `star` frame_f) | [
{
"abbrev": true,
"full_module": "FStar.FunctionalExtensionality",
"short_module": "FExt"
},
{
"abbrev": false,
"full_module": "Steel.Semantics.Instantiate",
"short_module": null
},
{
"abbrev": true,
"full_module": "Steel.Memory",
"short_module": "Mem"
},
{
"abbrev": true,
"full_module": "Steel.Semantics.Hoare.MST",
"short_module": "Sem"
},
{
"abbrev": false,
"full_module": "Steel.Effect.Common",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Tactics",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Ghost",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.FunctionalExtensionality",
"short_module": "FExt"
},
{
"abbrev": true,
"full_module": "Steel.Memory",
"short_module": "Mem"
},
{
"abbrev": false,
"full_module": "Steel.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let bind_req (#a:Type)
(#pre_f:pre_t) (#post_f:post_t a)
(req_f:req_t pre_f) (ens_f:ens_t pre_f a post_f)
(#pre_g:a -> pre_t)
(#pr:a -> prop)
(req_g:(x:a -> req_t (pre_g x)))
(frame_f:vprop) (frame_g:a -> vprop)
(_:squash (can_be_split_forall_dep pr (fun x -> post_f x `star` frame_f) (fun x -> pre_g x `star` frame_g x)))
: req_t (pre_f `star` frame_f)
= fun m0 ->
req_f (focus_rmem m0 pre_f) /\
(forall (x:a) (h1:hmem (post_f x `star` frame_f)).
(ens_f (focus_rmem m0 pre_f) x (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) (post_f x)) /\
frame_equalities frame_f (focus_rmem m0 frame_f) (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) frame_f))
==> pr x /\
(can_be_split_trans (post_f x `star` frame_f) (pre_g x `star` frame_g x) (pre_g x);
(req_g x) (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) (pre_g x)))) | val bind_req:
#a: Type ->
#pre_f: pre_t ->
#post_f: post_t a ->
req_f: req_t pre_f ->
ens_f: ens_t pre_f a post_f ->
#pre_g: (a -> pre_t) ->
#pr: (a -> prop) ->
req_g: (x: a -> req_t (pre_g x)) ->
frame_f: vprop ->
frame_g: (a -> vprop) ->
squash (can_be_split_forall_dep pr
(fun x -> (post_f x) `star` frame_f)
(fun x -> (pre_g x) `star` (frame_g x)))
-> req_t (pre_f `star` frame_f)
let bind_req
(#a: Type)
(#pre_f: pre_t)
(#post_f: post_t a)
(req_f: req_t pre_f)
(ens_f: ens_t pre_f a post_f)
(#pre_g: (a -> pre_t))
(#pr: (a -> prop))
(req_g: (x: a -> req_t (pre_g x)))
(frame_f: vprop)
(frame_g: (a -> vprop))
(_:
squash (can_be_split_forall_dep pr
(fun x -> (post_f x) `star` frame_f)
(fun x -> (pre_g x) `star` (frame_g x))))
: req_t (pre_f `star` frame_f) = | false | null | false | fun m0 ->
req_f (focus_rmem m0 pre_f) /\
(forall (x: a) (h1: hmem ((post_f x) `star` frame_f)).
(ens_f (focus_rmem m0 pre_f)
x
(focus_rmem (mk_rmem ((post_f x) `star` frame_f) h1) (post_f x)) /\
frame_equalities frame_f
(focus_rmem m0 frame_f)
(focus_rmem (mk_rmem ((post_f x) `star` frame_f) h1) frame_f)) ==>
pr x /\
(can_be_split_trans ((post_f x) `star` frame_f) ((pre_g x) `star` (frame_g x)) (pre_g x);
(req_g x) (focus_rmem (mk_rmem ((post_f x) `star` frame_f) h1) (pre_g x)))) | {
"checked_file": "Steel.Effect.fsti.checked",
"dependencies": [
"Steel.Memory.fsti.checked",
"Steel.Effect.Common.fsti.checked",
"prims.fst.checked",
"FStar.Tactics.fst.checked",
"FStar.Set.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Ghost.fsti.checked",
"FStar.FunctionalExtensionality.fsti.checked"
],
"interface_file": false,
"source_file": "Steel.Effect.fsti"
} | [
"total"
] | [
"Steel.Effect.Common.pre_t",
"Steel.Effect.Common.post_t",
"Steel.Effect.Common.req_t",
"Steel.Effect.Common.ens_t",
"Prims.prop",
"Steel.Effect.Common.vprop",
"Prims.squash",
"Steel.Effect.Common.can_be_split_forall_dep",
"Steel.Effect.Common.star",
"Steel.Effect.Common.rmem",
"Prims.l_and",
"Steel.Effect.Common.focus_rmem",
"Prims.l_Forall",
"Steel.Effect.Common.hmem",
"Prims.l_imp",
"Steel.Effect.Common.mk_rmem",
"Steel.Effect.Common.frame_equalities",
"Prims.unit",
"Steel.Effect.Common.can_be_split_trans"
] | [] | (*
Copyright 2020 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module Steel.Effect
open Steel.Memory
module Mem = Steel.Memory
module FExt = FStar.FunctionalExtensionality
open FStar.Ghost
module T = FStar.Tactics
include Steel.Effect.Common
/// This module defines the main Steel effect, with requires and ensures predicates operating on
/// selectors, which will be discharged by SMT
#set-options "--warn_error -330" //turn off the experimental feature warning
#set-options "--ide_id_info_off"
(* Defining the Steel effect with selectors *)
/// The underlying representation of Steel computations.
/// The framed bit indicates whether this computation has already been framed. This corresponds to the |- and |-_F modalities
/// in the ICFP21 paper
val repr (a:Type) (framed:bool) (pre:pre_t) (post:post_t a) (req:req_t pre) (ens:ens_t pre a post) : Type u#2
/// Logical precondition of the return combinator
unfold
let return_req (p:vprop) : req_t p = fun _ -> True
/// Logical postcondition of the return combinator:
/// The returned value [r] corresponds to the value passed to the return [x],
/// and return leaves selectors of all resources in [p] unchanged
unfold
let return_ens (a:Type) (x:a) (p:a -> vprop) : ens_t (p x) a p =
fun (h0:rmem (p x)) (r:a) (h1:rmem (p r)) ->
r == x /\ frame_equalities (p x) h0 (focus_rmem h1 (p x))
/// Monadic return combinator for the Steel effect. It is parametric in the postcondition
/// The vprop precondition is annotated with the return_pre predicate to enable special handling,
/// as explained in Steel.Effect.Common
val return_ (a:Type) (x:a) (#[@@@ framing_implicit] p:a -> vprop)
: repr a true (return_pre (p x)) p (return_req (p x)) (return_ens a x p)
/// Logical precondition for the composition (bind) of two Steel computations:
/// The postcondition of the first computation must imply the precondition of the second computation,
/// and also ensure that any equalities abducted during frame inference inside the predicate [pr] are satisfied
unfold
let bind_req (#a:Type)
(#pre_f:pre_t) (#post_f:post_t a)
(req_f:req_t pre_f) (ens_f:ens_t pre_f a post_f)
(#pre_g:a -> pre_t)
(#pr:a -> prop)
(req_g:(x:a -> req_t (pre_g x)))
(frame_f:vprop) (frame_g:a -> vprop)
(_:squash (can_be_split_forall_dep pr (fun x -> post_f x `star` frame_f) (fun x -> pre_g x `star` frame_g x))) | false | false | Steel.Effect.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val bind_req:
#a: Type ->
#pre_f: pre_t ->
#post_f: post_t a ->
req_f: req_t pre_f ->
ens_f: ens_t pre_f a post_f ->
#pre_g: (a -> pre_t) ->
#pr: (a -> prop) ->
req_g: (x: a -> req_t (pre_g x)) ->
frame_f: vprop ->
frame_g: (a -> vprop) ->
squash (can_be_split_forall_dep pr
(fun x -> (post_f x) `star` frame_f)
(fun x -> (pre_g x) `star` (frame_g x)))
-> req_t (pre_f `star` frame_f) | [] | Steel.Effect.bind_req | {
"file_name": "lib/steel/Steel.Effect.fsti",
"git_rev": "7fbb54e94dd4f48ff7cb867d3bae6889a635541e",
"git_url": "https://github.com/FStarLang/steel.git",
"project_name": "steel"
} |
req_f: Steel.Effect.Common.req_t pre_f ->
ens_f: Steel.Effect.Common.ens_t pre_f a post_f ->
req_g: (x: a -> Steel.Effect.Common.req_t (pre_g x)) ->
frame_f: Steel.Effect.Common.vprop ->
frame_g: (_: a -> Steel.Effect.Common.vprop) ->
_:
Prims.squash (Steel.Effect.Common.can_be_split_forall_dep pr
(fun x -> Steel.Effect.Common.star (post_f x) frame_f)
(fun x -> Steel.Effect.Common.star (pre_g x) (frame_g x)))
-> Steel.Effect.Common.req_t (Steel.Effect.Common.star pre_f frame_f) | {
"end_col": 79,
"end_line": 78,
"start_col": 2,
"start_line": 71
} |
Prims.Tot | val if_then_else_req
(#pre_f #pre_g: pre_t)
(#frame_f #frame_g: vprop)
(#pr: prop)
(s_pre: squash (can_be_split_dep pr (pre_f `star` frame_f) (pre_g `star` frame_g)))
(req_then: req_t pre_f)
(req_else: req_t pre_g)
(p: Type0)
: req_t (pre_f `star` frame_f) | [
{
"abbrev": true,
"full_module": "FStar.FunctionalExtensionality",
"short_module": "FExt"
},
{
"abbrev": false,
"full_module": "Steel.Semantics.Instantiate",
"short_module": null
},
{
"abbrev": true,
"full_module": "Steel.Memory",
"short_module": "Mem"
},
{
"abbrev": true,
"full_module": "Steel.Semantics.Hoare.MST",
"short_module": "Sem"
},
{
"abbrev": false,
"full_module": "Steel.Effect.Common",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Tactics",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Ghost",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.FunctionalExtensionality",
"short_module": "FExt"
},
{
"abbrev": true,
"full_module": "Steel.Memory",
"short_module": "Mem"
},
{
"abbrev": false,
"full_module": "Steel.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let if_then_else_req
(#pre_f:pre_t) (#pre_g:pre_t) (#frame_f #frame_g:vprop) (#pr: prop)
(s_pre: squash (can_be_split_dep pr (pre_f `star` frame_f) (pre_g `star` frame_g)))
(req_then:req_t pre_f) (req_else:req_t pre_g)
(p:Type0)
: req_t (pre_f `star` frame_f)
= fun h -> pr /\ (
can_be_split_trans (pre_f `star` frame_f) (pre_g `star` frame_g) pre_g;
(p ==> req_then (focus_rmem h pre_f)) /\
((~ p) ==> req_else (focus_rmem h pre_g))) | val if_then_else_req
(#pre_f #pre_g: pre_t)
(#frame_f #frame_g: vprop)
(#pr: prop)
(s_pre: squash (can_be_split_dep pr (pre_f `star` frame_f) (pre_g `star` frame_g)))
(req_then: req_t pre_f)
(req_else: req_t pre_g)
(p: Type0)
: req_t (pre_f `star` frame_f)
let if_then_else_req
(#pre_f #pre_g: pre_t)
(#frame_f #frame_g: vprop)
(#pr: prop)
(s_pre: squash (can_be_split_dep pr (pre_f `star` frame_f) (pre_g `star` frame_g)))
(req_then: req_t pre_f)
(req_else: req_t pre_g)
(p: Type0)
: req_t (pre_f `star` frame_f) = | false | null | false | fun h ->
pr /\
(can_be_split_trans (pre_f `star` frame_f) (pre_g `star` frame_g) pre_g;
(p ==> req_then (focus_rmem h pre_f)) /\ ((~p) ==> req_else (focus_rmem h pre_g))) | {
"checked_file": "Steel.Effect.fsti.checked",
"dependencies": [
"Steel.Memory.fsti.checked",
"Steel.Effect.Common.fsti.checked",
"prims.fst.checked",
"FStar.Tactics.fst.checked",
"FStar.Set.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Ghost.fsti.checked",
"FStar.FunctionalExtensionality.fsti.checked"
],
"interface_file": false,
"source_file": "Steel.Effect.fsti"
} | [
"total"
] | [
"Steel.Effect.Common.pre_t",
"Steel.Effect.Common.vprop",
"Prims.prop",
"Prims.squash",
"Steel.Effect.Common.can_be_split_dep",
"Steel.Effect.Common.star",
"Steel.Effect.Common.req_t",
"Steel.Effect.Common.rmem",
"Prims.l_and",
"Prims.l_imp",
"Steel.Effect.Common.focus_rmem",
"Prims.l_not",
"Prims.unit",
"Steel.Effect.Common.can_be_split_trans"
] | [] | (*
Copyright 2020 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module Steel.Effect
open Steel.Memory
module Mem = Steel.Memory
module FExt = FStar.FunctionalExtensionality
open FStar.Ghost
module T = FStar.Tactics
include Steel.Effect.Common
/// This module defines the main Steel effect, with requires and ensures predicates operating on
/// selectors, which will be discharged by SMT
#set-options "--warn_error -330" //turn off the experimental feature warning
#set-options "--ide_id_info_off"
(* Defining the Steel effect with selectors *)
/// The underlying representation of Steel computations.
/// The framed bit indicates whether this computation has already been framed. This corresponds to the |- and |-_F modalities
/// in the ICFP21 paper
val repr (a:Type) (framed:bool) (pre:pre_t) (post:post_t a) (req:req_t pre) (ens:ens_t pre a post) : Type u#2
/// Logical precondition of the return combinator
unfold
let return_req (p:vprop) : req_t p = fun _ -> True
/// Logical postcondition of the return combinator:
/// The returned value [r] corresponds to the value passed to the return [x],
/// and return leaves selectors of all resources in [p] unchanged
unfold
let return_ens (a:Type) (x:a) (p:a -> vprop) : ens_t (p x) a p =
fun (h0:rmem (p x)) (r:a) (h1:rmem (p r)) ->
r == x /\ frame_equalities (p x) h0 (focus_rmem h1 (p x))
/// Monadic return combinator for the Steel effect. It is parametric in the postcondition
/// The vprop precondition is annotated with the return_pre predicate to enable special handling,
/// as explained in Steel.Effect.Common
val return_ (a:Type) (x:a) (#[@@@ framing_implicit] p:a -> vprop)
: repr a true (return_pre (p x)) p (return_req (p x)) (return_ens a x p)
/// Logical precondition for the composition (bind) of two Steel computations:
/// The postcondition of the first computation must imply the precondition of the second computation,
/// and also ensure that any equalities abducted during frame inference inside the predicate [pr] are satisfied
unfold
let bind_req (#a:Type)
(#pre_f:pre_t) (#post_f:post_t a)
(req_f:req_t pre_f) (ens_f:ens_t pre_f a post_f)
(#pre_g:a -> pre_t)
(#pr:a -> prop)
(req_g:(x:a -> req_t (pre_g x)))
(frame_f:vprop) (frame_g:a -> vprop)
(_:squash (can_be_split_forall_dep pr (fun x -> post_f x `star` frame_f) (fun x -> pre_g x `star` frame_g x)))
: req_t (pre_f `star` frame_f)
= fun m0 ->
req_f (focus_rmem m0 pre_f) /\
(forall (x:a) (h1:hmem (post_f x `star` frame_f)).
(ens_f (focus_rmem m0 pre_f) x (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) (post_f x)) /\
frame_equalities frame_f (focus_rmem m0 frame_f) (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) frame_f))
==> pr x /\
(can_be_split_trans (post_f x `star` frame_f) (pre_g x `star` frame_g x) (pre_g x);
(req_g x) (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) (pre_g x))))
/// Logical postcondition for the composition (bind) of two Steel computations:
/// The precondition of the first computation was satisfied in the initial state, and there
/// exists an intermediate state where the two-state postcondition of the first computation was
/// satisfied, and which yields the validity of the two-state postcondition of the second computation
/// on the final state [m2] with the returned value [y]
/// Note that the ensures for the bind below asserts req_f
/// This is not necessary, but an explicit assert may help the solver
unfold
let bind_ens (#a:Type) (#b:Type)
(#pre_f:pre_t) (#post_f:post_t a)
(req_f:req_t pre_f) (ens_f:ens_t pre_f a post_f)
(#pre_g:a -> pre_t) (#post_g:a -> post_t b)
(#pr:a -> prop)
(ens_g:(x:a -> ens_t (pre_g x) b (post_g x)))
(frame_f:vprop) (frame_g:a -> vprop)
(post:post_t b)
(_:squash (can_be_split_forall_dep pr (fun x -> post_f x `star` frame_f) (fun x -> pre_g x `star` frame_g x)))
(_:squash (can_be_split_post (fun x y -> post_g x y `star` frame_g x) post))
: ens_t (pre_f `star` frame_f) b post
= fun m0 y m2 ->
req_f (focus_rmem m0 pre_f) /\
(exists (x:a) (h1:hmem (post_f x `star` frame_f)).
pr x /\
(
can_be_split_trans (post_f x `star` frame_f) (pre_g x `star` frame_g x) (pre_g x);
can_be_split_trans (post_f x `star` frame_f) (pre_g x `star` frame_g x) (frame_g x);
can_be_split_trans (post y) (post_g x y `star` frame_g x) (post_g x y);
can_be_split_trans (post y) (post_g x y `star` frame_g x) (frame_g x);
frame_equalities frame_f (focus_rmem m0 frame_f) (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) frame_f) /\
frame_equalities (frame_g x) (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) (frame_g x)) (focus_rmem m2 (frame_g x)) /\
ens_f (focus_rmem m0 pre_f) x (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) (post_f x)) /\
(ens_g x) (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) (pre_g x)) y (focus_rmem m2 (post_g x y))))
/// Steel effect combinator to compose two Steel computations
/// Separation logic VCs are squashed goals passed as implicits, annotated with the framing_implicit
/// attribute. This indicates that they will be discharged by the tactic in Steel.Effect.Common
/// Requires/ensures logical VCs are defined using weakest preconditions combinators defined above,
/// and discharged by SMT.
val bind (a:Type) (b:Type)
(#framed_f:eqtype_as_type bool)
(#framed_g:eqtype_as_type bool)
(#[@@@ framing_implicit] pre_f:pre_t) (#[@@@ framing_implicit] post_f:post_t a)
(#[@@@ framing_implicit] req_f:req_t pre_f) (#[@@@ framing_implicit] ens_f:ens_t pre_f a post_f)
(#[@@@ framing_implicit] pre_g:a -> pre_t) (#[@@@ framing_implicit] post_g:a -> post_t b)
(#[@@@ framing_implicit] req_g:(x:a -> req_t (pre_g x))) (#[@@@ framing_implicit] ens_g:(x:a -> ens_t (pre_g x) b (post_g x)))
(#[@@@ framing_implicit] frame_f:vprop) (#[@@@ framing_implicit] frame_g:a -> vprop)
(#[@@@ framing_implicit] post:post_t b)
(#[@@@ framing_implicit] _ : squash (maybe_emp framed_f frame_f))
(#[@@@ framing_implicit] _ : squash (maybe_emp_dep framed_g frame_g))
(#[@@@ framing_implicit] pr:a -> prop)
(#[@@@ framing_implicit] p1:squash (can_be_split_forall_dep pr
(fun x -> post_f x `star` frame_f) (fun x -> pre_g x `star` frame_g x)))
(#[@@@ framing_implicit] p2:squash (can_be_split_post (fun x y -> post_g x y `star` frame_g x) post))
(f:repr a framed_f pre_f post_f req_f ens_f)
(g:(x:a -> repr b framed_g (pre_g x) (post_g x) (req_g x) (ens_g x)))
: repr b
true
(pre_f `star` frame_f)
post
(bind_req req_f ens_f req_g frame_f frame_g p1)
(bind_ens req_f ens_f ens_g frame_f frame_g post p1 p2)
/// Logical precondition for subtyping relation for Steel computation.
unfold
let subcomp_pre (#a:Type)
(#pre_f:pre_t) (#post_f:post_t a) (req_f:req_t pre_f) (ens_f:ens_t pre_f a post_f)
(#pre_g:pre_t) (#post_g:post_t a) (req_g:req_t pre_g) (ens_g:ens_t pre_g a post_g)
(#frame:vprop)
(#pr:prop)
(_:squash (can_be_split_dep pr pre_g (pre_f `star` frame)))
(_:squash (equiv_forall post_g (fun x -> post_f x `star` frame)))
: pure_pre
// The call to with_tactic allows us to reduce VCs in a controlled way, once all
// uvars have been resolved.
// To ensure an SMT-friendly encoding of the VC, it needs to be encapsulated in a squash call
= T.rewrite_with_tactic vc_norm (squash (
(forall (h0:hmem pre_g). req_g (mk_rmem pre_g h0) ==> pr /\
(can_be_split_trans pre_g (pre_f `star` frame) pre_f;
req_f (focus_rmem (mk_rmem pre_g h0) pre_f))) /\
(forall (h0:hmem pre_g) (x:a) (h1:hmem (post_g x)). (
pr ==> (
can_be_split_trans (post_g x) (post_f x `star` frame) (post_f x);
can_be_split_trans (pre_g) (pre_f `star` frame) frame;
can_be_split_trans (post_g x) (post_f x `star` frame) frame;
can_be_split_trans pre_g (pre_f `star` frame) pre_f;
(req_g (mk_rmem pre_g h0) /\
ens_f (focus_rmem (mk_rmem pre_g h0) pre_f) x (focus_rmem (mk_rmem (post_g x) h1) (post_f x)) /\
frame_equalities frame
(focus_rmem (mk_rmem pre_g h0) frame)
(focus_rmem (mk_rmem (post_g x) h1) frame))
==> ens_g (mk_rmem pre_g h0) x (mk_rmem (post_g x) h1))
))
))
/// Subtyping combinator for Steel computations.
/// Computation [f] is given type `repr a framed_g pre_g post_g req_g ens_g`.
/// As for bind, separation logic goals are encoded as squashed implicits which will be discharged
/// by tactic, while logical requires/ensures operating on selectors are discharged by SMT
val subcomp (a:Type)
(#framed_f:eqtype_as_type bool)
(#framed_g:eqtype_as_type bool)
(#[@@@ framing_implicit] pre_f:pre_t) (#[@@@ framing_implicit] post_f:post_t a)
(#[@@@ framing_implicit] req_f:req_t pre_f) (#[@@@ framing_implicit] ens_f:ens_t pre_f a post_f)
(#[@@@ framing_implicit] pre_g:pre_t) (#[@@@ framing_implicit] post_g:post_t a)
(#[@@@ framing_implicit] req_g:req_t pre_g) (#[@@@ framing_implicit] ens_g:ens_t pre_g a post_g)
(#[@@@ framing_implicit] frame:vprop)
(#[@@@ framing_implicit] _ : squash (maybe_emp framed_f frame))
(#[@@@ framing_implicit] pr : prop)
(#[@@@ framing_implicit] p1:squash (can_be_split_dep pr pre_g (pre_f `star` frame)))
(#[@@@ framing_implicit] p2:squash (equiv_forall post_g (fun x -> post_f x `star` frame)))
(f:repr a framed_f pre_f post_f req_f ens_f)
: Pure (repr a framed_g pre_g post_g req_g ens_g)
(requires subcomp_pre req_f ens_f req_g ens_g p1 p2)
(ensures fun _ -> True)
/// Logical precondition for the if_then_else combinator
unfold
let if_then_else_req
(#pre_f:pre_t) (#pre_g:pre_t) (#frame_f #frame_g:vprop) (#pr: prop)
(s_pre: squash (can_be_split_dep pr (pre_f `star` frame_f) (pre_g `star` frame_g)))
(req_then:req_t pre_f) (req_else:req_t pre_g)
(p:Type0) | false | false | Steel.Effect.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val if_then_else_req
(#pre_f #pre_g: pre_t)
(#frame_f #frame_g: vprop)
(#pr: prop)
(s_pre: squash (can_be_split_dep pr (pre_f `star` frame_f) (pre_g `star` frame_g)))
(req_then: req_t pre_f)
(req_else: req_t pre_g)
(p: Type0)
: req_t (pre_f `star` frame_f) | [] | Steel.Effect.if_then_else_req | {
"file_name": "lib/steel/Steel.Effect.fsti",
"git_rev": "7fbb54e94dd4f48ff7cb867d3bae6889a635541e",
"git_url": "https://github.com/FStarLang/steel.git",
"project_name": "steel"
} |
s_pre:
Prims.squash (Steel.Effect.Common.can_be_split_dep pr
(Steel.Effect.Common.star pre_f frame_f)
(Steel.Effect.Common.star pre_g frame_g)) ->
req_then: Steel.Effect.Common.req_t pre_f ->
req_else: Steel.Effect.Common.req_t pre_g ->
p: Type0
-> Steel.Effect.Common.req_t (Steel.Effect.Common.star pre_f frame_f) | {
"end_col": 46,
"end_line": 210,
"start_col": 2,
"start_line": 207
} |
Prims.Tot | val subcomp_pre:
#a: Type ->
#pre_f: pre_t ->
#post_f: post_t a ->
req_f: req_t pre_f ->
ens_f: ens_t pre_f a post_f ->
#pre_g: pre_t ->
#post_g: post_t a ->
req_g: req_t pre_g ->
ens_g: ens_t pre_g a post_g ->
#frame: vprop ->
#pr: prop ->
squash (can_be_split_dep pr pre_g (pre_f `star` frame)) ->
squash (equiv_forall post_g (fun x -> (post_f x) `star` frame))
-> pure_pre | [
{
"abbrev": true,
"full_module": "FStar.FunctionalExtensionality",
"short_module": "FExt"
},
{
"abbrev": false,
"full_module": "Steel.Semantics.Instantiate",
"short_module": null
},
{
"abbrev": true,
"full_module": "Steel.Memory",
"short_module": "Mem"
},
{
"abbrev": true,
"full_module": "Steel.Semantics.Hoare.MST",
"short_module": "Sem"
},
{
"abbrev": false,
"full_module": "Steel.Effect.Common",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Tactics",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Ghost",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.FunctionalExtensionality",
"short_module": "FExt"
},
{
"abbrev": true,
"full_module": "Steel.Memory",
"short_module": "Mem"
},
{
"abbrev": false,
"full_module": "Steel.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let subcomp_pre (#a:Type)
(#pre_f:pre_t) (#post_f:post_t a) (req_f:req_t pre_f) (ens_f:ens_t pre_f a post_f)
(#pre_g:pre_t) (#post_g:post_t a) (req_g:req_t pre_g) (ens_g:ens_t pre_g a post_g)
(#frame:vprop)
(#pr:prop)
(_:squash (can_be_split_dep pr pre_g (pre_f `star` frame)))
(_:squash (equiv_forall post_g (fun x -> post_f x `star` frame)))
: pure_pre
// The call to with_tactic allows us to reduce VCs in a controlled way, once all
// uvars have been resolved.
// To ensure an SMT-friendly encoding of the VC, it needs to be encapsulated in a squash call
= T.rewrite_with_tactic vc_norm (squash (
(forall (h0:hmem pre_g). req_g (mk_rmem pre_g h0) ==> pr /\
(can_be_split_trans pre_g (pre_f `star` frame) pre_f;
req_f (focus_rmem (mk_rmem pre_g h0) pre_f))) /\
(forall (h0:hmem pre_g) (x:a) (h1:hmem (post_g x)). (
pr ==> (
can_be_split_trans (post_g x) (post_f x `star` frame) (post_f x);
can_be_split_trans (pre_g) (pre_f `star` frame) frame;
can_be_split_trans (post_g x) (post_f x `star` frame) frame;
can_be_split_trans pre_g (pre_f `star` frame) pre_f;
(req_g (mk_rmem pre_g h0) /\
ens_f (focus_rmem (mk_rmem pre_g h0) pre_f) x (focus_rmem (mk_rmem (post_g x) h1) (post_f x)) /\
frame_equalities frame
(focus_rmem (mk_rmem pre_g h0) frame)
(focus_rmem (mk_rmem (post_g x) h1) frame))
==> ens_g (mk_rmem pre_g h0) x (mk_rmem (post_g x) h1))
))
)) | val subcomp_pre:
#a: Type ->
#pre_f: pre_t ->
#post_f: post_t a ->
req_f: req_t pre_f ->
ens_f: ens_t pre_f a post_f ->
#pre_g: pre_t ->
#post_g: post_t a ->
req_g: req_t pre_g ->
ens_g: ens_t pre_g a post_g ->
#frame: vprop ->
#pr: prop ->
squash (can_be_split_dep pr pre_g (pre_f `star` frame)) ->
squash (equiv_forall post_g (fun x -> (post_f x) `star` frame))
-> pure_pre
let subcomp_pre
(#a: Type)
(#pre_f: pre_t)
(#post_f: post_t a)
(req_f: req_t pre_f)
(ens_f: ens_t pre_f a post_f)
(#pre_g: pre_t)
(#post_g: post_t a)
(req_g: req_t pre_g)
(ens_g: ens_t pre_g a post_g)
(#frame: vprop)
(#pr: prop)
(_: squash (can_be_split_dep pr pre_g (pre_f `star` frame)))
(_: squash (equiv_forall post_g (fun x -> (post_f x) `star` frame)))
: pure_pre = | false | null | false | T.rewrite_with_tactic vc_norm
(squash ((forall (h0: hmem pre_g).
req_g (mk_rmem pre_g h0) ==>
pr /\
(can_be_split_trans pre_g (pre_f `star` frame) pre_f;
req_f (focus_rmem (mk_rmem pre_g h0) pre_f))) /\
(forall (h0: hmem pre_g) (x: a) (h1: hmem (post_g x)).
(pr ==>
(can_be_split_trans (post_g x) ((post_f x) `star` frame) (post_f x);
can_be_split_trans (pre_g) (pre_f `star` frame) frame;
can_be_split_trans (post_g x) ((post_f x) `star` frame) frame;
can_be_split_trans pre_g (pre_f `star` frame) pre_f;
(req_g (mk_rmem pre_g h0) /\
ens_f (focus_rmem (mk_rmem pre_g h0) pre_f)
x
(focus_rmem (mk_rmem (post_g x) h1) (post_f x)) /\
frame_equalities frame
(focus_rmem (mk_rmem pre_g h0) frame)
(focus_rmem (mk_rmem (post_g x) h1) frame)) ==>
ens_g (mk_rmem pre_g h0) x (mk_rmem (post_g x) h1)))))) | {
"checked_file": "Steel.Effect.fsti.checked",
"dependencies": [
"Steel.Memory.fsti.checked",
"Steel.Effect.Common.fsti.checked",
"prims.fst.checked",
"FStar.Tactics.fst.checked",
"FStar.Set.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Ghost.fsti.checked",
"FStar.FunctionalExtensionality.fsti.checked"
],
"interface_file": false,
"source_file": "Steel.Effect.fsti"
} | [
"total"
] | [
"Steel.Effect.Common.pre_t",
"Steel.Effect.Common.post_t",
"Steel.Effect.Common.req_t",
"Steel.Effect.Common.ens_t",
"Steel.Effect.Common.vprop",
"Prims.prop",
"Prims.squash",
"Steel.Effect.Common.can_be_split_dep",
"Steel.Effect.Common.star",
"Steel.Effect.Common.equiv_forall",
"FStar.Tactics.Effect.rewrite_with_tactic",
"Steel.Effect.Common.vc_norm",
"Prims.l_and",
"Prims.l_Forall",
"Steel.Effect.Common.hmem",
"Prims.l_imp",
"Steel.Effect.Common.mk_rmem",
"Steel.Effect.Common.focus_rmem",
"Prims.unit",
"Steel.Effect.Common.can_be_split_trans",
"Steel.Effect.Common.frame_equalities",
"Prims.pure_pre"
] | [] | (*
Copyright 2020 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module Steel.Effect
open Steel.Memory
module Mem = Steel.Memory
module FExt = FStar.FunctionalExtensionality
open FStar.Ghost
module T = FStar.Tactics
include Steel.Effect.Common
/// This module defines the main Steel effect, with requires and ensures predicates operating on
/// selectors, which will be discharged by SMT
#set-options "--warn_error -330" //turn off the experimental feature warning
#set-options "--ide_id_info_off"
(* Defining the Steel effect with selectors *)
/// The underlying representation of Steel computations.
/// The framed bit indicates whether this computation has already been framed. This corresponds to the |- and |-_F modalities
/// in the ICFP21 paper
val repr (a:Type) (framed:bool) (pre:pre_t) (post:post_t a) (req:req_t pre) (ens:ens_t pre a post) : Type u#2
/// Logical precondition of the return combinator
unfold
let return_req (p:vprop) : req_t p = fun _ -> True
/// Logical postcondition of the return combinator:
/// The returned value [r] corresponds to the value passed to the return [x],
/// and return leaves selectors of all resources in [p] unchanged
unfold
let return_ens (a:Type) (x:a) (p:a -> vprop) : ens_t (p x) a p =
fun (h0:rmem (p x)) (r:a) (h1:rmem (p r)) ->
r == x /\ frame_equalities (p x) h0 (focus_rmem h1 (p x))
/// Monadic return combinator for the Steel effect. It is parametric in the postcondition
/// The vprop precondition is annotated with the return_pre predicate to enable special handling,
/// as explained in Steel.Effect.Common
val return_ (a:Type) (x:a) (#[@@@ framing_implicit] p:a -> vprop)
: repr a true (return_pre (p x)) p (return_req (p x)) (return_ens a x p)
/// Logical precondition for the composition (bind) of two Steel computations:
/// The postcondition of the first computation must imply the precondition of the second computation,
/// and also ensure that any equalities abducted during frame inference inside the predicate [pr] are satisfied
unfold
let bind_req (#a:Type)
(#pre_f:pre_t) (#post_f:post_t a)
(req_f:req_t pre_f) (ens_f:ens_t pre_f a post_f)
(#pre_g:a -> pre_t)
(#pr:a -> prop)
(req_g:(x:a -> req_t (pre_g x)))
(frame_f:vprop) (frame_g:a -> vprop)
(_:squash (can_be_split_forall_dep pr (fun x -> post_f x `star` frame_f) (fun x -> pre_g x `star` frame_g x)))
: req_t (pre_f `star` frame_f)
= fun m0 ->
req_f (focus_rmem m0 pre_f) /\
(forall (x:a) (h1:hmem (post_f x `star` frame_f)).
(ens_f (focus_rmem m0 pre_f) x (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) (post_f x)) /\
frame_equalities frame_f (focus_rmem m0 frame_f) (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) frame_f))
==> pr x /\
(can_be_split_trans (post_f x `star` frame_f) (pre_g x `star` frame_g x) (pre_g x);
(req_g x) (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) (pre_g x))))
/// Logical postcondition for the composition (bind) of two Steel computations:
/// The precondition of the first computation was satisfied in the initial state, and there
/// exists an intermediate state where the two-state postcondition of the first computation was
/// satisfied, and which yields the validity of the two-state postcondition of the second computation
/// on the final state [m2] with the returned value [y]
/// Note that the ensures for the bind below asserts req_f
/// This is not necessary, but an explicit assert may help the solver
unfold
let bind_ens (#a:Type) (#b:Type)
(#pre_f:pre_t) (#post_f:post_t a)
(req_f:req_t pre_f) (ens_f:ens_t pre_f a post_f)
(#pre_g:a -> pre_t) (#post_g:a -> post_t b)
(#pr:a -> prop)
(ens_g:(x:a -> ens_t (pre_g x) b (post_g x)))
(frame_f:vprop) (frame_g:a -> vprop)
(post:post_t b)
(_:squash (can_be_split_forall_dep pr (fun x -> post_f x `star` frame_f) (fun x -> pre_g x `star` frame_g x)))
(_:squash (can_be_split_post (fun x y -> post_g x y `star` frame_g x) post))
: ens_t (pre_f `star` frame_f) b post
= fun m0 y m2 ->
req_f (focus_rmem m0 pre_f) /\
(exists (x:a) (h1:hmem (post_f x `star` frame_f)).
pr x /\
(
can_be_split_trans (post_f x `star` frame_f) (pre_g x `star` frame_g x) (pre_g x);
can_be_split_trans (post_f x `star` frame_f) (pre_g x `star` frame_g x) (frame_g x);
can_be_split_trans (post y) (post_g x y `star` frame_g x) (post_g x y);
can_be_split_trans (post y) (post_g x y `star` frame_g x) (frame_g x);
frame_equalities frame_f (focus_rmem m0 frame_f) (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) frame_f) /\
frame_equalities (frame_g x) (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) (frame_g x)) (focus_rmem m2 (frame_g x)) /\
ens_f (focus_rmem m0 pre_f) x (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) (post_f x)) /\
(ens_g x) (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) (pre_g x)) y (focus_rmem m2 (post_g x y))))
/// Steel effect combinator to compose two Steel computations
/// Separation logic VCs are squashed goals passed as implicits, annotated with the framing_implicit
/// attribute. This indicates that they will be discharged by the tactic in Steel.Effect.Common
/// Requires/ensures logical VCs are defined using weakest preconditions combinators defined above,
/// and discharged by SMT.
val bind (a:Type) (b:Type)
(#framed_f:eqtype_as_type bool)
(#framed_g:eqtype_as_type bool)
(#[@@@ framing_implicit] pre_f:pre_t) (#[@@@ framing_implicit] post_f:post_t a)
(#[@@@ framing_implicit] req_f:req_t pre_f) (#[@@@ framing_implicit] ens_f:ens_t pre_f a post_f)
(#[@@@ framing_implicit] pre_g:a -> pre_t) (#[@@@ framing_implicit] post_g:a -> post_t b)
(#[@@@ framing_implicit] req_g:(x:a -> req_t (pre_g x))) (#[@@@ framing_implicit] ens_g:(x:a -> ens_t (pre_g x) b (post_g x)))
(#[@@@ framing_implicit] frame_f:vprop) (#[@@@ framing_implicit] frame_g:a -> vprop)
(#[@@@ framing_implicit] post:post_t b)
(#[@@@ framing_implicit] _ : squash (maybe_emp framed_f frame_f))
(#[@@@ framing_implicit] _ : squash (maybe_emp_dep framed_g frame_g))
(#[@@@ framing_implicit] pr:a -> prop)
(#[@@@ framing_implicit] p1:squash (can_be_split_forall_dep pr
(fun x -> post_f x `star` frame_f) (fun x -> pre_g x `star` frame_g x)))
(#[@@@ framing_implicit] p2:squash (can_be_split_post (fun x y -> post_g x y `star` frame_g x) post))
(f:repr a framed_f pre_f post_f req_f ens_f)
(g:(x:a -> repr b framed_g (pre_g x) (post_g x) (req_g x) (ens_g x)))
: repr b
true
(pre_f `star` frame_f)
post
(bind_req req_f ens_f req_g frame_f frame_g p1)
(bind_ens req_f ens_f ens_g frame_f frame_g post p1 p2)
/// Logical precondition for subtyping relation for Steel computation.
unfold
let subcomp_pre (#a:Type)
(#pre_f:pre_t) (#post_f:post_t a) (req_f:req_t pre_f) (ens_f:ens_t pre_f a post_f)
(#pre_g:pre_t) (#post_g:post_t a) (req_g:req_t pre_g) (ens_g:ens_t pre_g a post_g)
(#frame:vprop)
(#pr:prop)
(_:squash (can_be_split_dep pr pre_g (pre_f `star` frame)))
(_:squash (equiv_forall post_g (fun x -> post_f x `star` frame)))
: pure_pre
// The call to with_tactic allows us to reduce VCs in a controlled way, once all
// uvars have been resolved. | false | false | Steel.Effect.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val subcomp_pre:
#a: Type ->
#pre_f: pre_t ->
#post_f: post_t a ->
req_f: req_t pre_f ->
ens_f: ens_t pre_f a post_f ->
#pre_g: pre_t ->
#post_g: post_t a ->
req_g: req_t pre_g ->
ens_g: ens_t pre_g a post_g ->
#frame: vprop ->
#pr: prop ->
squash (can_be_split_dep pr pre_g (pre_f `star` frame)) ->
squash (equiv_forall post_g (fun x -> (post_f x) `star` frame))
-> pure_pre | [] | Steel.Effect.subcomp_pre | {
"file_name": "lib/steel/Steel.Effect.fsti",
"git_rev": "7fbb54e94dd4f48ff7cb867d3bae6889a635541e",
"git_url": "https://github.com/FStarLang/steel.git",
"project_name": "steel"
} |
req_f: Steel.Effect.Common.req_t pre_f ->
ens_f: Steel.Effect.Common.ens_t pre_f a post_f ->
req_g: Steel.Effect.Common.req_t pre_g ->
ens_g: Steel.Effect.Common.ens_t pre_g a post_g ->
_:
Prims.squash (Steel.Effect.Common.can_be_split_dep pr
pre_g
(Steel.Effect.Common.star pre_f frame)) ->
_:
Prims.squash (Steel.Effect.Common.equiv_forall post_g
(fun x -> Steel.Effect.Common.star (post_f x) frame))
-> Prims.pure_pre | {
"end_col": 2,
"end_line": 176,
"start_col": 2,
"start_line": 157
} |
Prims.Tot | val if_then_else_ens
(#a: Type)
(#pre_f #pre_g: pre_t)
(#post_f #post_g: post_t a)
(#frame_f #frame_g: vprop)
(#pr: prop)
(s1: squash (can_be_split_dep pr (pre_f `star` frame_f) (pre_g `star` frame_g)))
(s2:
squash (equiv_forall (fun x -> (post_f x) `star` frame_f)
(fun x -> (post_g x) `star` frame_g)))
(ens_then: ens_t pre_f a post_f)
(ens_else: ens_t pre_g a post_g)
(p: Type0)
: ens_t (pre_f `star` frame_f) a (fun x -> (post_f x) `star` frame_f) | [
{
"abbrev": true,
"full_module": "FStar.FunctionalExtensionality",
"short_module": "FExt"
},
{
"abbrev": false,
"full_module": "Steel.Semantics.Instantiate",
"short_module": null
},
{
"abbrev": true,
"full_module": "Steel.Memory",
"short_module": "Mem"
},
{
"abbrev": true,
"full_module": "Steel.Semantics.Hoare.MST",
"short_module": "Sem"
},
{
"abbrev": false,
"full_module": "Steel.Effect.Common",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Tactics",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Ghost",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.FunctionalExtensionality",
"short_module": "FExt"
},
{
"abbrev": true,
"full_module": "Steel.Memory",
"short_module": "Mem"
},
{
"abbrev": false,
"full_module": "Steel.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let if_then_else_ens (#a:Type)
(#pre_f:pre_t) (#pre_g:pre_t) (#post_f:post_t a) (#post_g:post_t a)
(#frame_f #frame_g:vprop) (#pr:prop)
(s1: squash (can_be_split_dep pr (pre_f `star` frame_f) (pre_g `star` frame_g)))
(s2: squash (equiv_forall (fun x -> post_f x `star` frame_f) (fun x -> post_g x `star` frame_g)))
(ens_then:ens_t pre_f a post_f) (ens_else:ens_t pre_g a post_g)
(p:Type0)
: ens_t (pre_f `star` frame_f) a (fun x -> post_f x `star` frame_f)
= fun h0 x h1 -> pr /\ (
can_be_split_trans (pre_f `star` frame_f) (pre_g `star` frame_g) pre_g;
can_be_split_trans (post_f x `star` frame_f) (post_g x `star` frame_g) (post_g x);
(p ==> ens_then (focus_rmem h0 pre_f) x (focus_rmem h1 (post_f x))) /\
((~ p) ==> ens_else (focus_rmem h0 pre_g) x (focus_rmem h1 (post_g x)))) | val if_then_else_ens
(#a: Type)
(#pre_f #pre_g: pre_t)
(#post_f #post_g: post_t a)
(#frame_f #frame_g: vprop)
(#pr: prop)
(s1: squash (can_be_split_dep pr (pre_f `star` frame_f) (pre_g `star` frame_g)))
(s2:
squash (equiv_forall (fun x -> (post_f x) `star` frame_f)
(fun x -> (post_g x) `star` frame_g)))
(ens_then: ens_t pre_f a post_f)
(ens_else: ens_t pre_g a post_g)
(p: Type0)
: ens_t (pre_f `star` frame_f) a (fun x -> (post_f x) `star` frame_f)
let if_then_else_ens
(#a: Type)
(#pre_f #pre_g: pre_t)
(#post_f #post_g: post_t a)
(#frame_f #frame_g: vprop)
(#pr: prop)
(s1: squash (can_be_split_dep pr (pre_f `star` frame_f) (pre_g `star` frame_g)))
(s2:
squash (equiv_forall (fun x -> (post_f x) `star` frame_f)
(fun x -> (post_g x) `star` frame_g)))
(ens_then: ens_t pre_f a post_f)
(ens_else: ens_t pre_g a post_g)
(p: Type0)
: ens_t (pre_f `star` frame_f) a (fun x -> (post_f x) `star` frame_f) = | false | null | false | fun h0 x h1 ->
pr /\
(can_be_split_trans (pre_f `star` frame_f) (pre_g `star` frame_g) pre_g;
can_be_split_trans ((post_f x) `star` frame_f) ((post_g x) `star` frame_g) (post_g x);
(p ==> ens_then (focus_rmem h0 pre_f) x (focus_rmem h1 (post_f x))) /\
((~p) ==> ens_else (focus_rmem h0 pre_g) x (focus_rmem h1 (post_g x)))) | {
"checked_file": "Steel.Effect.fsti.checked",
"dependencies": [
"Steel.Memory.fsti.checked",
"Steel.Effect.Common.fsti.checked",
"prims.fst.checked",
"FStar.Tactics.fst.checked",
"FStar.Set.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Ghost.fsti.checked",
"FStar.FunctionalExtensionality.fsti.checked"
],
"interface_file": false,
"source_file": "Steel.Effect.fsti"
} | [
"total"
] | [
"Steel.Effect.Common.pre_t",
"Steel.Effect.Common.post_t",
"Steel.Effect.Common.vprop",
"Prims.prop",
"Prims.squash",
"Steel.Effect.Common.can_be_split_dep",
"Steel.Effect.Common.star",
"Steel.Effect.Common.equiv_forall",
"Steel.Effect.Common.ens_t",
"Steel.Effect.Common.rmem",
"Prims.l_and",
"Prims.l_imp",
"Steel.Effect.Common.focus_rmem",
"Prims.l_not",
"Prims.unit",
"Steel.Effect.Common.can_be_split_trans"
] | [] | (*
Copyright 2020 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module Steel.Effect
open Steel.Memory
module Mem = Steel.Memory
module FExt = FStar.FunctionalExtensionality
open FStar.Ghost
module T = FStar.Tactics
include Steel.Effect.Common
/// This module defines the main Steel effect, with requires and ensures predicates operating on
/// selectors, which will be discharged by SMT
#set-options "--warn_error -330" //turn off the experimental feature warning
#set-options "--ide_id_info_off"
(* Defining the Steel effect with selectors *)
/// The underlying representation of Steel computations.
/// The framed bit indicates whether this computation has already been framed. This corresponds to the |- and |-_F modalities
/// in the ICFP21 paper
val repr (a:Type) (framed:bool) (pre:pre_t) (post:post_t a) (req:req_t pre) (ens:ens_t pre a post) : Type u#2
/// Logical precondition of the return combinator
unfold
let return_req (p:vprop) : req_t p = fun _ -> True
/// Logical postcondition of the return combinator:
/// The returned value [r] corresponds to the value passed to the return [x],
/// and return leaves selectors of all resources in [p] unchanged
unfold
let return_ens (a:Type) (x:a) (p:a -> vprop) : ens_t (p x) a p =
fun (h0:rmem (p x)) (r:a) (h1:rmem (p r)) ->
r == x /\ frame_equalities (p x) h0 (focus_rmem h1 (p x))
/// Monadic return combinator for the Steel effect. It is parametric in the postcondition
/// The vprop precondition is annotated with the return_pre predicate to enable special handling,
/// as explained in Steel.Effect.Common
val return_ (a:Type) (x:a) (#[@@@ framing_implicit] p:a -> vprop)
: repr a true (return_pre (p x)) p (return_req (p x)) (return_ens a x p)
/// Logical precondition for the composition (bind) of two Steel computations:
/// The postcondition of the first computation must imply the precondition of the second computation,
/// and also ensure that any equalities abducted during frame inference inside the predicate [pr] are satisfied
unfold
let bind_req (#a:Type)
(#pre_f:pre_t) (#post_f:post_t a)
(req_f:req_t pre_f) (ens_f:ens_t pre_f a post_f)
(#pre_g:a -> pre_t)
(#pr:a -> prop)
(req_g:(x:a -> req_t (pre_g x)))
(frame_f:vprop) (frame_g:a -> vprop)
(_:squash (can_be_split_forall_dep pr (fun x -> post_f x `star` frame_f) (fun x -> pre_g x `star` frame_g x)))
: req_t (pre_f `star` frame_f)
= fun m0 ->
req_f (focus_rmem m0 pre_f) /\
(forall (x:a) (h1:hmem (post_f x `star` frame_f)).
(ens_f (focus_rmem m0 pre_f) x (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) (post_f x)) /\
frame_equalities frame_f (focus_rmem m0 frame_f) (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) frame_f))
==> pr x /\
(can_be_split_trans (post_f x `star` frame_f) (pre_g x `star` frame_g x) (pre_g x);
(req_g x) (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) (pre_g x))))
/// Logical postcondition for the composition (bind) of two Steel computations:
/// The precondition of the first computation was satisfied in the initial state, and there
/// exists an intermediate state where the two-state postcondition of the first computation was
/// satisfied, and which yields the validity of the two-state postcondition of the second computation
/// on the final state [m2] with the returned value [y]
/// Note that the ensures for the bind below asserts req_f
/// This is not necessary, but an explicit assert may help the solver
unfold
let bind_ens (#a:Type) (#b:Type)
(#pre_f:pre_t) (#post_f:post_t a)
(req_f:req_t pre_f) (ens_f:ens_t pre_f a post_f)
(#pre_g:a -> pre_t) (#post_g:a -> post_t b)
(#pr:a -> prop)
(ens_g:(x:a -> ens_t (pre_g x) b (post_g x)))
(frame_f:vprop) (frame_g:a -> vprop)
(post:post_t b)
(_:squash (can_be_split_forall_dep pr (fun x -> post_f x `star` frame_f) (fun x -> pre_g x `star` frame_g x)))
(_:squash (can_be_split_post (fun x y -> post_g x y `star` frame_g x) post))
: ens_t (pre_f `star` frame_f) b post
= fun m0 y m2 ->
req_f (focus_rmem m0 pre_f) /\
(exists (x:a) (h1:hmem (post_f x `star` frame_f)).
pr x /\
(
can_be_split_trans (post_f x `star` frame_f) (pre_g x `star` frame_g x) (pre_g x);
can_be_split_trans (post_f x `star` frame_f) (pre_g x `star` frame_g x) (frame_g x);
can_be_split_trans (post y) (post_g x y `star` frame_g x) (post_g x y);
can_be_split_trans (post y) (post_g x y `star` frame_g x) (frame_g x);
frame_equalities frame_f (focus_rmem m0 frame_f) (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) frame_f) /\
frame_equalities (frame_g x) (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) (frame_g x)) (focus_rmem m2 (frame_g x)) /\
ens_f (focus_rmem m0 pre_f) x (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) (post_f x)) /\
(ens_g x) (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) (pre_g x)) y (focus_rmem m2 (post_g x y))))
/// Steel effect combinator to compose two Steel computations
/// Separation logic VCs are squashed goals passed as implicits, annotated with the framing_implicit
/// attribute. This indicates that they will be discharged by the tactic in Steel.Effect.Common
/// Requires/ensures logical VCs are defined using weakest preconditions combinators defined above,
/// and discharged by SMT.
val bind (a:Type) (b:Type)
(#framed_f:eqtype_as_type bool)
(#framed_g:eqtype_as_type bool)
(#[@@@ framing_implicit] pre_f:pre_t) (#[@@@ framing_implicit] post_f:post_t a)
(#[@@@ framing_implicit] req_f:req_t pre_f) (#[@@@ framing_implicit] ens_f:ens_t pre_f a post_f)
(#[@@@ framing_implicit] pre_g:a -> pre_t) (#[@@@ framing_implicit] post_g:a -> post_t b)
(#[@@@ framing_implicit] req_g:(x:a -> req_t (pre_g x))) (#[@@@ framing_implicit] ens_g:(x:a -> ens_t (pre_g x) b (post_g x)))
(#[@@@ framing_implicit] frame_f:vprop) (#[@@@ framing_implicit] frame_g:a -> vprop)
(#[@@@ framing_implicit] post:post_t b)
(#[@@@ framing_implicit] _ : squash (maybe_emp framed_f frame_f))
(#[@@@ framing_implicit] _ : squash (maybe_emp_dep framed_g frame_g))
(#[@@@ framing_implicit] pr:a -> prop)
(#[@@@ framing_implicit] p1:squash (can_be_split_forall_dep pr
(fun x -> post_f x `star` frame_f) (fun x -> pre_g x `star` frame_g x)))
(#[@@@ framing_implicit] p2:squash (can_be_split_post (fun x y -> post_g x y `star` frame_g x) post))
(f:repr a framed_f pre_f post_f req_f ens_f)
(g:(x:a -> repr b framed_g (pre_g x) (post_g x) (req_g x) (ens_g x)))
: repr b
true
(pre_f `star` frame_f)
post
(bind_req req_f ens_f req_g frame_f frame_g p1)
(bind_ens req_f ens_f ens_g frame_f frame_g post p1 p2)
/// Logical precondition for subtyping relation for Steel computation.
unfold
let subcomp_pre (#a:Type)
(#pre_f:pre_t) (#post_f:post_t a) (req_f:req_t pre_f) (ens_f:ens_t pre_f a post_f)
(#pre_g:pre_t) (#post_g:post_t a) (req_g:req_t pre_g) (ens_g:ens_t pre_g a post_g)
(#frame:vprop)
(#pr:prop)
(_:squash (can_be_split_dep pr pre_g (pre_f `star` frame)))
(_:squash (equiv_forall post_g (fun x -> post_f x `star` frame)))
: pure_pre
// The call to with_tactic allows us to reduce VCs in a controlled way, once all
// uvars have been resolved.
// To ensure an SMT-friendly encoding of the VC, it needs to be encapsulated in a squash call
= T.rewrite_with_tactic vc_norm (squash (
(forall (h0:hmem pre_g). req_g (mk_rmem pre_g h0) ==> pr /\
(can_be_split_trans pre_g (pre_f `star` frame) pre_f;
req_f (focus_rmem (mk_rmem pre_g h0) pre_f))) /\
(forall (h0:hmem pre_g) (x:a) (h1:hmem (post_g x)). (
pr ==> (
can_be_split_trans (post_g x) (post_f x `star` frame) (post_f x);
can_be_split_trans (pre_g) (pre_f `star` frame) frame;
can_be_split_trans (post_g x) (post_f x `star` frame) frame;
can_be_split_trans pre_g (pre_f `star` frame) pre_f;
(req_g (mk_rmem pre_g h0) /\
ens_f (focus_rmem (mk_rmem pre_g h0) pre_f) x (focus_rmem (mk_rmem (post_g x) h1) (post_f x)) /\
frame_equalities frame
(focus_rmem (mk_rmem pre_g h0) frame)
(focus_rmem (mk_rmem (post_g x) h1) frame))
==> ens_g (mk_rmem pre_g h0) x (mk_rmem (post_g x) h1))
))
))
/// Subtyping combinator for Steel computations.
/// Computation [f] is given type `repr a framed_g pre_g post_g req_g ens_g`.
/// As for bind, separation logic goals are encoded as squashed implicits which will be discharged
/// by tactic, while logical requires/ensures operating on selectors are discharged by SMT
val subcomp (a:Type)
(#framed_f:eqtype_as_type bool)
(#framed_g:eqtype_as_type bool)
(#[@@@ framing_implicit] pre_f:pre_t) (#[@@@ framing_implicit] post_f:post_t a)
(#[@@@ framing_implicit] req_f:req_t pre_f) (#[@@@ framing_implicit] ens_f:ens_t pre_f a post_f)
(#[@@@ framing_implicit] pre_g:pre_t) (#[@@@ framing_implicit] post_g:post_t a)
(#[@@@ framing_implicit] req_g:req_t pre_g) (#[@@@ framing_implicit] ens_g:ens_t pre_g a post_g)
(#[@@@ framing_implicit] frame:vprop)
(#[@@@ framing_implicit] _ : squash (maybe_emp framed_f frame))
(#[@@@ framing_implicit] pr : prop)
(#[@@@ framing_implicit] p1:squash (can_be_split_dep pr pre_g (pre_f `star` frame)))
(#[@@@ framing_implicit] p2:squash (equiv_forall post_g (fun x -> post_f x `star` frame)))
(f:repr a framed_f pre_f post_f req_f ens_f)
: Pure (repr a framed_g pre_g post_g req_g ens_g)
(requires subcomp_pre req_f ens_f req_g ens_g p1 p2)
(ensures fun _ -> True)
/// Logical precondition for the if_then_else combinator
unfold
let if_then_else_req
(#pre_f:pre_t) (#pre_g:pre_t) (#frame_f #frame_g:vprop) (#pr: prop)
(s_pre: squash (can_be_split_dep pr (pre_f `star` frame_f) (pre_g `star` frame_g)))
(req_then:req_t pre_f) (req_else:req_t pre_g)
(p:Type0)
: req_t (pre_f `star` frame_f)
= fun h -> pr /\ (
can_be_split_trans (pre_f `star` frame_f) (pre_g `star` frame_g) pre_g;
(p ==> req_then (focus_rmem h pre_f)) /\
((~ p) ==> req_else (focus_rmem h pre_g)))
/// Logical postcondition for the if_then_else combinator
unfold
let if_then_else_ens (#a:Type)
(#pre_f:pre_t) (#pre_g:pre_t) (#post_f:post_t a) (#post_g:post_t a)
(#frame_f #frame_g:vprop) (#pr:prop)
(s1: squash (can_be_split_dep pr (pre_f `star` frame_f) (pre_g `star` frame_g)))
(s2: squash (equiv_forall (fun x -> post_f x `star` frame_f) (fun x -> post_g x `star` frame_g)))
(ens_then:ens_t pre_f a post_f) (ens_else:ens_t pre_g a post_g)
(p:Type0) | false | false | Steel.Effect.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val if_then_else_ens
(#a: Type)
(#pre_f #pre_g: pre_t)
(#post_f #post_g: post_t a)
(#frame_f #frame_g: vprop)
(#pr: prop)
(s1: squash (can_be_split_dep pr (pre_f `star` frame_f) (pre_g `star` frame_g)))
(s2:
squash (equiv_forall (fun x -> (post_f x) `star` frame_f)
(fun x -> (post_g x) `star` frame_g)))
(ens_then: ens_t pre_f a post_f)
(ens_else: ens_t pre_g a post_g)
(p: Type0)
: ens_t (pre_f `star` frame_f) a (fun x -> (post_f x) `star` frame_f) | [] | Steel.Effect.if_then_else_ens | {
"file_name": "lib/steel/Steel.Effect.fsti",
"git_rev": "7fbb54e94dd4f48ff7cb867d3bae6889a635541e",
"git_url": "https://github.com/FStarLang/steel.git",
"project_name": "steel"
} |
s1:
Prims.squash (Steel.Effect.Common.can_be_split_dep pr
(Steel.Effect.Common.star pre_f frame_f)
(Steel.Effect.Common.star pre_g frame_g)) ->
s2:
Prims.squash (Steel.Effect.Common.equiv_forall (fun x ->
Steel.Effect.Common.star (post_f x) frame_f)
(fun x -> Steel.Effect.Common.star (post_g x) frame_g)) ->
ens_then: Steel.Effect.Common.ens_t pre_f a post_f ->
ens_else: Steel.Effect.Common.ens_t pre_g a post_g ->
p: Type0
-> Steel.Effect.Common.ens_t (Steel.Effect.Common.star pre_f frame_f)
a
(fun x -> Steel.Effect.Common.star (post_f x) frame_f) | {
"end_col": 76,
"end_line": 226,
"start_col": 2,
"start_line": 222
} |
Prims.Tot | val bind_ens:
#a: Type ->
#b: Type ->
#pre_f: pre_t ->
#post_f: post_t a ->
req_f: req_t pre_f ->
ens_f: ens_t pre_f a post_f ->
#pre_g: (a -> pre_t) ->
#post_g: (a -> post_t b) ->
#pr: (a -> prop) ->
ens_g: (x: a -> ens_t (pre_g x) b (post_g x)) ->
frame_f: vprop ->
frame_g: (a -> vprop) ->
post: post_t b ->
squash (can_be_split_forall_dep pr
(fun x -> (post_f x) `star` frame_f)
(fun x -> (pre_g x) `star` (frame_g x))) ->
squash (can_be_split_post (fun x y -> (post_g x y) `star` (frame_g x)) post)
-> ens_t (pre_f `star` frame_f) b post | [
{
"abbrev": true,
"full_module": "FStar.FunctionalExtensionality",
"short_module": "FExt"
},
{
"abbrev": false,
"full_module": "Steel.Semantics.Instantiate",
"short_module": null
},
{
"abbrev": true,
"full_module": "Steel.Memory",
"short_module": "Mem"
},
{
"abbrev": true,
"full_module": "Steel.Semantics.Hoare.MST",
"short_module": "Sem"
},
{
"abbrev": false,
"full_module": "Steel.Effect.Common",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.Tactics",
"short_module": "T"
},
{
"abbrev": false,
"full_module": "FStar.Ghost",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.FunctionalExtensionality",
"short_module": "FExt"
},
{
"abbrev": true,
"full_module": "Steel.Memory",
"short_module": "Mem"
},
{
"abbrev": false,
"full_module": "Steel.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let bind_ens (#a:Type) (#b:Type)
(#pre_f:pre_t) (#post_f:post_t a)
(req_f:req_t pre_f) (ens_f:ens_t pre_f a post_f)
(#pre_g:a -> pre_t) (#post_g:a -> post_t b)
(#pr:a -> prop)
(ens_g:(x:a -> ens_t (pre_g x) b (post_g x)))
(frame_f:vprop) (frame_g:a -> vprop)
(post:post_t b)
(_:squash (can_be_split_forall_dep pr (fun x -> post_f x `star` frame_f) (fun x -> pre_g x `star` frame_g x)))
(_:squash (can_be_split_post (fun x y -> post_g x y `star` frame_g x) post))
: ens_t (pre_f `star` frame_f) b post
= fun m0 y m2 ->
req_f (focus_rmem m0 pre_f) /\
(exists (x:a) (h1:hmem (post_f x `star` frame_f)).
pr x /\
(
can_be_split_trans (post_f x `star` frame_f) (pre_g x `star` frame_g x) (pre_g x);
can_be_split_trans (post_f x `star` frame_f) (pre_g x `star` frame_g x) (frame_g x);
can_be_split_trans (post y) (post_g x y `star` frame_g x) (post_g x y);
can_be_split_trans (post y) (post_g x y `star` frame_g x) (frame_g x);
frame_equalities frame_f (focus_rmem m0 frame_f) (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) frame_f) /\
frame_equalities (frame_g x) (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) (frame_g x)) (focus_rmem m2 (frame_g x)) /\
ens_f (focus_rmem m0 pre_f) x (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) (post_f x)) /\
(ens_g x) (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) (pre_g x)) y (focus_rmem m2 (post_g x y)))) | val bind_ens:
#a: Type ->
#b: Type ->
#pre_f: pre_t ->
#post_f: post_t a ->
req_f: req_t pre_f ->
ens_f: ens_t pre_f a post_f ->
#pre_g: (a -> pre_t) ->
#post_g: (a -> post_t b) ->
#pr: (a -> prop) ->
ens_g: (x: a -> ens_t (pre_g x) b (post_g x)) ->
frame_f: vprop ->
frame_g: (a -> vprop) ->
post: post_t b ->
squash (can_be_split_forall_dep pr
(fun x -> (post_f x) `star` frame_f)
(fun x -> (pre_g x) `star` (frame_g x))) ->
squash (can_be_split_post (fun x y -> (post_g x y) `star` (frame_g x)) post)
-> ens_t (pre_f `star` frame_f) b post
let bind_ens
(#a: Type)
(#b: Type)
(#pre_f: pre_t)
(#post_f: post_t a)
(req_f: req_t pre_f)
(ens_f: ens_t pre_f a post_f)
(#pre_g: (a -> pre_t))
(#post_g: (a -> post_t b))
(#pr: (a -> prop))
(ens_g: (x: a -> ens_t (pre_g x) b (post_g x)))
(frame_f: vprop)
(frame_g: (a -> vprop))
(post: post_t b)
(_:
squash (can_be_split_forall_dep pr
(fun x -> (post_f x) `star` frame_f)
(fun x -> (pre_g x) `star` (frame_g x))))
(_: squash (can_be_split_post (fun x y -> (post_g x y) `star` (frame_g x)) post))
: ens_t (pre_f `star` frame_f) b post = | false | null | false | fun m0 y m2 ->
req_f (focus_rmem m0 pre_f) /\
(exists (x: a) (h1: hmem ((post_f x) `star` frame_f)).
pr x /\
(can_be_split_trans ((post_f x) `star` frame_f) ((pre_g x) `star` (frame_g x)) (pre_g x);
can_be_split_trans ((post_f x) `star` frame_f) ((pre_g x) `star` (frame_g x)) (frame_g x);
can_be_split_trans (post y) ((post_g x y) `star` (frame_g x)) (post_g x y);
can_be_split_trans (post y) ((post_g x y) `star` (frame_g x)) (frame_g x);
frame_equalities frame_f
(focus_rmem m0 frame_f)
(focus_rmem (mk_rmem ((post_f x) `star` frame_f) h1) frame_f) /\
frame_equalities (frame_g x)
(focus_rmem (mk_rmem ((post_f x) `star` frame_f) h1) (frame_g x))
(focus_rmem m2 (frame_g x)) /\
ens_f (focus_rmem m0 pre_f)
x
(focus_rmem (mk_rmem ((post_f x) `star` frame_f) h1) (post_f x)) /\
(ens_g x) (focus_rmem (mk_rmem ((post_f x) `star` frame_f) h1) (pre_g x))
y
(focus_rmem m2 (post_g x y)))) | {
"checked_file": "Steel.Effect.fsti.checked",
"dependencies": [
"Steel.Memory.fsti.checked",
"Steel.Effect.Common.fsti.checked",
"prims.fst.checked",
"FStar.Tactics.fst.checked",
"FStar.Set.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Ghost.fsti.checked",
"FStar.FunctionalExtensionality.fsti.checked"
],
"interface_file": false,
"source_file": "Steel.Effect.fsti"
} | [
"total"
] | [
"Steel.Effect.Common.pre_t",
"Steel.Effect.Common.post_t",
"Steel.Effect.Common.req_t",
"Steel.Effect.Common.ens_t",
"Prims.prop",
"Steel.Effect.Common.vprop",
"Prims.squash",
"Steel.Effect.Common.can_be_split_forall_dep",
"Steel.Effect.Common.star",
"Steel.Effect.Common.can_be_split_post",
"Steel.Effect.Common.rmem",
"Prims.l_and",
"Steel.Effect.Common.focus_rmem",
"Prims.l_Exists",
"Steel.Effect.Common.hmem",
"Steel.Effect.Common.frame_equalities",
"Steel.Effect.Common.mk_rmem",
"Prims.unit",
"Steel.Effect.Common.can_be_split_trans"
] | [] | (*
Copyright 2020 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module Steel.Effect
open Steel.Memory
module Mem = Steel.Memory
module FExt = FStar.FunctionalExtensionality
open FStar.Ghost
module T = FStar.Tactics
include Steel.Effect.Common
/// This module defines the main Steel effect, with requires and ensures predicates operating on
/// selectors, which will be discharged by SMT
#set-options "--warn_error -330" //turn off the experimental feature warning
#set-options "--ide_id_info_off"
(* Defining the Steel effect with selectors *)
/// The underlying representation of Steel computations.
/// The framed bit indicates whether this computation has already been framed. This corresponds to the |- and |-_F modalities
/// in the ICFP21 paper
val repr (a:Type) (framed:bool) (pre:pre_t) (post:post_t a) (req:req_t pre) (ens:ens_t pre a post) : Type u#2
/// Logical precondition of the return combinator
unfold
let return_req (p:vprop) : req_t p = fun _ -> True
/// Logical postcondition of the return combinator:
/// The returned value [r] corresponds to the value passed to the return [x],
/// and return leaves selectors of all resources in [p] unchanged
unfold
let return_ens (a:Type) (x:a) (p:a -> vprop) : ens_t (p x) a p =
fun (h0:rmem (p x)) (r:a) (h1:rmem (p r)) ->
r == x /\ frame_equalities (p x) h0 (focus_rmem h1 (p x))
/// Monadic return combinator for the Steel effect. It is parametric in the postcondition
/// The vprop precondition is annotated with the return_pre predicate to enable special handling,
/// as explained in Steel.Effect.Common
val return_ (a:Type) (x:a) (#[@@@ framing_implicit] p:a -> vprop)
: repr a true (return_pre (p x)) p (return_req (p x)) (return_ens a x p)
/// Logical precondition for the composition (bind) of two Steel computations:
/// The postcondition of the first computation must imply the precondition of the second computation,
/// and also ensure that any equalities abducted during frame inference inside the predicate [pr] are satisfied
unfold
let bind_req (#a:Type)
(#pre_f:pre_t) (#post_f:post_t a)
(req_f:req_t pre_f) (ens_f:ens_t pre_f a post_f)
(#pre_g:a -> pre_t)
(#pr:a -> prop)
(req_g:(x:a -> req_t (pre_g x)))
(frame_f:vprop) (frame_g:a -> vprop)
(_:squash (can_be_split_forall_dep pr (fun x -> post_f x `star` frame_f) (fun x -> pre_g x `star` frame_g x)))
: req_t (pre_f `star` frame_f)
= fun m0 ->
req_f (focus_rmem m0 pre_f) /\
(forall (x:a) (h1:hmem (post_f x `star` frame_f)).
(ens_f (focus_rmem m0 pre_f) x (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) (post_f x)) /\
frame_equalities frame_f (focus_rmem m0 frame_f) (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) frame_f))
==> pr x /\
(can_be_split_trans (post_f x `star` frame_f) (pre_g x `star` frame_g x) (pre_g x);
(req_g x) (focus_rmem (mk_rmem (post_f x `star` frame_f) h1) (pre_g x))))
/// Logical postcondition for the composition (bind) of two Steel computations:
/// The precondition of the first computation was satisfied in the initial state, and there
/// exists an intermediate state where the two-state postcondition of the first computation was
/// satisfied, and which yields the validity of the two-state postcondition of the second computation
/// on the final state [m2] with the returned value [y]
/// Note that the ensures for the bind below asserts req_f
/// This is not necessary, but an explicit assert may help the solver
unfold
let bind_ens (#a:Type) (#b:Type)
(#pre_f:pre_t) (#post_f:post_t a)
(req_f:req_t pre_f) (ens_f:ens_t pre_f a post_f)
(#pre_g:a -> pre_t) (#post_g:a -> post_t b)
(#pr:a -> prop)
(ens_g:(x:a -> ens_t (pre_g x) b (post_g x)))
(frame_f:vprop) (frame_g:a -> vprop)
(post:post_t b)
(_:squash (can_be_split_forall_dep pr (fun x -> post_f x `star` frame_f) (fun x -> pre_g x `star` frame_g x)))
(_:squash (can_be_split_post (fun x y -> post_g x y `star` frame_g x) post)) | false | false | Steel.Effect.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val bind_ens:
#a: Type ->
#b: Type ->
#pre_f: pre_t ->
#post_f: post_t a ->
req_f: req_t pre_f ->
ens_f: ens_t pre_f a post_f ->
#pre_g: (a -> pre_t) ->
#post_g: (a -> post_t b) ->
#pr: (a -> prop) ->
ens_g: (x: a -> ens_t (pre_g x) b (post_g x)) ->
frame_f: vprop ->
frame_g: (a -> vprop) ->
post: post_t b ->
squash (can_be_split_forall_dep pr
(fun x -> (post_f x) `star` frame_f)
(fun x -> (pre_g x) `star` (frame_g x))) ->
squash (can_be_split_post (fun x y -> (post_g x y) `star` (frame_g x)) post)
-> ens_t (pre_f `star` frame_f) b post | [] | Steel.Effect.bind_ens | {
"file_name": "lib/steel/Steel.Effect.fsti",
"git_rev": "7fbb54e94dd4f48ff7cb867d3bae6889a635541e",
"git_url": "https://github.com/FStarLang/steel.git",
"project_name": "steel"
} |
req_f: Steel.Effect.Common.req_t pre_f ->
ens_f: Steel.Effect.Common.ens_t pre_f a post_f ->
ens_g: (x: a -> Steel.Effect.Common.ens_t (pre_g x) b (post_g x)) ->
frame_f: Steel.Effect.Common.vprop ->
frame_g: (_: a -> Steel.Effect.Common.vprop) ->
post: Steel.Effect.Common.post_t b ->
_:
Prims.squash (Steel.Effect.Common.can_be_split_forall_dep pr
(fun x -> Steel.Effect.Common.star (post_f x) frame_f)
(fun x -> Steel.Effect.Common.star (pre_g x) (frame_g x))) ->
_:
Prims.squash (Steel.Effect.Common.can_be_split_post (fun x y ->
Steel.Effect.Common.star (post_g x y) (frame_g x))
post)
-> Steel.Effect.Common.ens_t (Steel.Effect.Common.star pre_f frame_f) b post | {
"end_col": 108,
"end_line": 113,
"start_col": 2,
"start_line": 101
} |
Prims.Tot | val raise_t ([@@@ strictly_positive] _ : Type u#a) : Type u#(max a b) | [
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let raise_t a = raise0 a | val raise_t ([@@@ strictly_positive] _ : Type u#a) : Type u#(max a b)
let raise_t a = | false | null | false | raise0 a | {
"checked_file": "FStar.Universe.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "FStar.Universe.fst"
} | [
"total"
] | [
"FStar.Universe.raise0"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Universe
(** This module implements some basic facilities to raise the universe of a type *
* The type [raise_t a] is supposed to be isomorphic to [a] but in a higher *
* universe. The two functions [raise_val] and [downgrade_val] allow to coerce *
* from [a] to [raise_t a] and back. **)
noeq type raise0 (a : Type u#a) : Type u#(max a b) =
| Ret : a -> raise0 a | false | true | FStar.Universe.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val raise_t ([@@@ strictly_positive] _ : Type u#a) : Type u#(max a b) | [] | FStar.Universe.raise_t | {
"file_name": "ulib/FStar.Universe.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: Type -> Type | {
"end_col": 24,
"end_line": 26,
"start_col": 16,
"start_line": 26
} |
Prims.Tot | val raise_val : #a:Type u#a -> x:a -> raise_t u#a u#b a | [
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let raise_val #a x = Ret x | val raise_val : #a:Type u#a -> x:a -> raise_t u#a u#b a
let raise_val #a x = | false | null | false | Ret x | {
"checked_file": "FStar.Universe.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "FStar.Universe.fst"
} | [
"total"
] | [
"FStar.Universe.Ret",
"FStar.Universe.raise_t"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Universe
(** This module implements some basic facilities to raise the universe of a type *
* The type [raise_t a] is supposed to be isomorphic to [a] but in a higher *
* universe. The two functions [raise_val] and [downgrade_val] allow to coerce *
* from [a] to [raise_t a] and back. **)
noeq type raise0 (a : Type u#a) : Type u#(max a b) =
| Ret : a -> raise0 a | false | false | FStar.Universe.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val raise_val : #a:Type u#a -> x:a -> raise_t u#a u#b a | [] | FStar.Universe.raise_val | {
"file_name": "ulib/FStar.Universe.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | x: a -> FStar.Universe.raise_t a | {
"end_col": 26,
"end_line": 27,
"start_col": 21,
"start_line": 27
} |
Prims.Tot | val downgrade_val : #a:Type u#a -> x:raise_t u#a u#b a -> a | [
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let downgrade_val #a x = match x with Ret x0 -> x0 | val downgrade_val : #a:Type u#a -> x:raise_t u#a u#b a -> a
let downgrade_val #a x = | false | null | false | match x with | Ret x0 -> x0 | {
"checked_file": "FStar.Universe.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "FStar.Universe.fst"
} | [
"total"
] | [
"FStar.Universe.raise_t"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Universe
(** This module implements some basic facilities to raise the universe of a type *
* The type [raise_t a] is supposed to be isomorphic to [a] but in a higher *
* universe. The two functions [raise_val] and [downgrade_val] allow to coerce *
* from [a] to [raise_t a] and back. **)
noeq type raise0 (a : Type u#a) : Type u#(max a b) =
| Ret : a -> raise0 a
let raise_t a = raise0 a | false | false | FStar.Universe.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val downgrade_val : #a:Type u#a -> x:raise_t u#a u#b a -> a | [] | FStar.Universe.downgrade_val | {
"file_name": "ulib/FStar.Universe.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | x: FStar.Universe.raise_t a -> a | {
"end_col": 50,
"end_line": 28,
"start_col": 25,
"start_line": 28
} |
Prims.Tot | val bn_from_bytes_le_uint64:bn_from_bytes_le_st U64 | [
{
"abbrev": true,
"full_module": "Hacl.Spec.Bignum.Convert",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Lib.Sequence",
"short_module": "LSeq"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "ST"
},
{
"abbrev": false,
"full_module": "Hacl.Bignum.Definitions",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteBuffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Buffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack.ST",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Bignum",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Bignum",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let bn_from_bytes_le_uint64 : bn_from_bytes_le_st U64 = mk_bn_from_bytes_le #U64 false | val bn_from_bytes_le_uint64:bn_from_bytes_le_st U64
let bn_from_bytes_le_uint64:bn_from_bytes_le_st U64 = | false | null | false | mk_bn_from_bytes_le #U64 false | {
"checked_file": "Hacl.Bignum.Convert.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Ignore.fsti.checked",
"LowStar.Buffer.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteBuffer.fsti.checked",
"Lib.Buffer.fsti.checked",
"Hacl.Spec.Bignum.Convert.fst.checked",
"Hacl.Bignum.Definitions.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked"
],
"interface_file": false,
"source_file": "Hacl.Bignum.Convert.fst"
} | [
"total"
] | [
"Hacl.Bignum.Convert.mk_bn_from_bytes_le",
"Lib.IntTypes.U64"
] | [] | module Hacl.Bignum.Convert
open FStar.HyperStack
open FStar.HyperStack.ST
open FStar.Mul
open Lib.IntTypes
open Lib.Buffer
open Lib.ByteBuffer
open Hacl.Bignum.Definitions
module ST = FStar.HyperStack.ST
module LSeq = Lib.Sequence
module S = Hacl.Spec.Bignum.Convert
#reset-options "--z3rlimit 50 --fuel 0 --ifuel 0"
inline_for_extraction noextract
val bn_from_uint:
#t:limb_t
-> len:size_t{0 < v len}
-> x:limb t
-> b:lbignum t len ->
Stack unit
(requires fun h -> live h b)
(ensures fun h0 _ h1 -> modifies (loc b) h0 h1 /\
as_seq h1 b == S.bn_from_uint (v len) x)
let bn_from_uint #t len x b =
memset b (uint #t 0) len;
b.(0ul) <- x
inline_for_extraction noextract
val bn_from_bytes_be_:
#t:limb_t
-> len:size_t{numbytes t * v len <= max_size_t}
-> b:lbuffer uint8 (size (numbytes t) *! len)
-> res:lbignum t len ->
Stack unit
(requires fun h -> live h b /\ live h res /\ disjoint res b)
(ensures fun h0 _ h1 -> modifies (loc res) h0 h1 /\
as_seq h1 res == S.bn_from_bytes_be_ (v len) (as_seq h0 b))
let bn_from_bytes_be_ #t len b res =
let h0 = ST.get () in
[@inline_let]
let spec h = S.bn_from_bytes_be_f (v len) (as_seq h b) in
fill h0 len res spec
(fun j -> uint_from_bytes_be (sub b ((len -! j -! 1ul) *! (size (numbytes t))) (size (numbytes t))))
inline_for_extraction noextract
let bn_from_bytes_be_st (t:limb_t) =
len:size_t{0 < v len /\ numbytes t * v (blocks len (size (numbytes t))) <= max_size_t}
-> b:lbuffer uint8 len
-> res:lbignum t (blocks len (size (numbytes t))) ->
Stack unit
(requires fun h -> live h b /\ live h res /\ disjoint res b)
(ensures fun h0 _ h1 -> modifies (loc res) h0 h1 /\
as_seq h1 res == S.bn_from_bytes_be (v len) (as_seq h0 b))
inline_for_extraction noextract
val mk_bn_from_bytes_be: #t:limb_t -> is_known_len:bool -> bn_from_bytes_be_st t
let mk_bn_from_bytes_be #t is_known_len len b res =
push_frame ();
if is_known_len then begin
[@inline_let] let numb = size (numbytes t) in
[@inline_let] let bnLen = blocks len numb in
[@inline_let] let tmpLen = numb *! bnLen in
if tmpLen =. len then
bn_from_bytes_be_ bnLen b res
else begin
let tmp = create tmpLen (u8 0) in
update_sub tmp (tmpLen -! len) len b;
bn_from_bytes_be_ bnLen tmp res end end
else begin
[@inline_let] let numb = size (numbytes t) in
let bnLen = blocks len numb in
let tmpLen = numb *! bnLen in
let tmp = create tmpLen (u8 0) in
update_sub tmp (tmpLen -! len) len b;
bn_from_bytes_be_ bnLen tmp res end;
pop_frame ()
[@CInline]
let bn_from_bytes_be_uint32 : bn_from_bytes_be_st U32 = mk_bn_from_bytes_be #U32 false
let bn_from_bytes_be_uint64 : bn_from_bytes_be_st U64 = mk_bn_from_bytes_be #U64 false
inline_for_extraction noextract
val bn_from_bytes_be: #t:limb_t -> bn_from_bytes_be_st t
let bn_from_bytes_be #t =
match t with
| U32 -> bn_from_bytes_be_uint32
| U64 -> bn_from_bytes_be_uint64
inline_for_extraction noextract
let bn_from_bytes_le_st (t:limb_t) =
len:size_t{0 < v len /\ numbytes t * v (blocks len (size (numbytes t))) <= max_size_t}
-> b:lbuffer uint8 len
-> res:lbignum t (blocks len (size (numbytes t))) ->
Stack unit
(requires fun h -> live h b /\ live h res /\ disjoint res b)
(ensures fun h0 _ h1 -> modifies (loc res) h0 h1 /\
as_seq h1 res == S.bn_from_bytes_le (v len) (as_seq h0 b))
inline_for_extraction noextract
val mk_bn_from_bytes_le: #t:limb_t -> is_known_len:bool -> bn_from_bytes_le_st t
let mk_bn_from_bytes_le #t is_known_len len b res =
push_frame ();
if is_known_len then begin
[@inline_let] let numb = size (numbytes t) in
[@inline_let] let bnLen = blocks len numb in
[@inline_let] let tmpLen = numb *! bnLen in
if tmpLen =. len then
uints_from_bytes_le res b
else begin
let tmp = create tmpLen (u8 0) in
update_sub tmp 0ul len b;
uints_from_bytes_le res tmp end end
else begin
[@inline_let] let numb = size (numbytes t) in
let bnLen = blocks len numb in
let tmpLen = numb *! bnLen in
let tmp = create tmpLen (u8 0) in
update_sub tmp 0ul len b;
uints_from_bytes_le res tmp end;
pop_frame ()
[@CInline]
let bn_from_bytes_le_uint32 : bn_from_bytes_le_st U32 = mk_bn_from_bytes_le #U32 false | false | true | Hacl.Bignum.Convert.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 50,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val bn_from_bytes_le_uint64:bn_from_bytes_le_st U64 | [] | Hacl.Bignum.Convert.bn_from_bytes_le_uint64 | {
"file_name": "code/bignum/Hacl.Bignum.Convert.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | Hacl.Bignum.Convert.bn_from_bytes_le_st Lib.IntTypes.U64 | {
"end_col": 86,
"end_line": 141,
"start_col": 56,
"start_line": 141
} |
Prims.Tot | val bn_from_bytes_le_uint32:bn_from_bytes_le_st U32 | [
{
"abbrev": true,
"full_module": "Hacl.Spec.Bignum.Convert",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Lib.Sequence",
"short_module": "LSeq"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "ST"
},
{
"abbrev": false,
"full_module": "Hacl.Bignum.Definitions",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteBuffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Buffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack.ST",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Bignum",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Bignum",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let bn_from_bytes_le_uint32 : bn_from_bytes_le_st U32 = mk_bn_from_bytes_le #U32 false | val bn_from_bytes_le_uint32:bn_from_bytes_le_st U32
let bn_from_bytes_le_uint32:bn_from_bytes_le_st U32 = | false | null | false | mk_bn_from_bytes_le #U32 false | {
"checked_file": "Hacl.Bignum.Convert.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Ignore.fsti.checked",
"LowStar.Buffer.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteBuffer.fsti.checked",
"Lib.Buffer.fsti.checked",
"Hacl.Spec.Bignum.Convert.fst.checked",
"Hacl.Bignum.Definitions.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked"
],
"interface_file": false,
"source_file": "Hacl.Bignum.Convert.fst"
} | [
"total"
] | [
"Hacl.Bignum.Convert.mk_bn_from_bytes_le",
"Lib.IntTypes.U32"
] | [] | module Hacl.Bignum.Convert
open FStar.HyperStack
open FStar.HyperStack.ST
open FStar.Mul
open Lib.IntTypes
open Lib.Buffer
open Lib.ByteBuffer
open Hacl.Bignum.Definitions
module ST = FStar.HyperStack.ST
module LSeq = Lib.Sequence
module S = Hacl.Spec.Bignum.Convert
#reset-options "--z3rlimit 50 --fuel 0 --ifuel 0"
inline_for_extraction noextract
val bn_from_uint:
#t:limb_t
-> len:size_t{0 < v len}
-> x:limb t
-> b:lbignum t len ->
Stack unit
(requires fun h -> live h b)
(ensures fun h0 _ h1 -> modifies (loc b) h0 h1 /\
as_seq h1 b == S.bn_from_uint (v len) x)
let bn_from_uint #t len x b =
memset b (uint #t 0) len;
b.(0ul) <- x
inline_for_extraction noextract
val bn_from_bytes_be_:
#t:limb_t
-> len:size_t{numbytes t * v len <= max_size_t}
-> b:lbuffer uint8 (size (numbytes t) *! len)
-> res:lbignum t len ->
Stack unit
(requires fun h -> live h b /\ live h res /\ disjoint res b)
(ensures fun h0 _ h1 -> modifies (loc res) h0 h1 /\
as_seq h1 res == S.bn_from_bytes_be_ (v len) (as_seq h0 b))
let bn_from_bytes_be_ #t len b res =
let h0 = ST.get () in
[@inline_let]
let spec h = S.bn_from_bytes_be_f (v len) (as_seq h b) in
fill h0 len res spec
(fun j -> uint_from_bytes_be (sub b ((len -! j -! 1ul) *! (size (numbytes t))) (size (numbytes t))))
inline_for_extraction noextract
let bn_from_bytes_be_st (t:limb_t) =
len:size_t{0 < v len /\ numbytes t * v (blocks len (size (numbytes t))) <= max_size_t}
-> b:lbuffer uint8 len
-> res:lbignum t (blocks len (size (numbytes t))) ->
Stack unit
(requires fun h -> live h b /\ live h res /\ disjoint res b)
(ensures fun h0 _ h1 -> modifies (loc res) h0 h1 /\
as_seq h1 res == S.bn_from_bytes_be (v len) (as_seq h0 b))
inline_for_extraction noextract
val mk_bn_from_bytes_be: #t:limb_t -> is_known_len:bool -> bn_from_bytes_be_st t
let mk_bn_from_bytes_be #t is_known_len len b res =
push_frame ();
if is_known_len then begin
[@inline_let] let numb = size (numbytes t) in
[@inline_let] let bnLen = blocks len numb in
[@inline_let] let tmpLen = numb *! bnLen in
if tmpLen =. len then
bn_from_bytes_be_ bnLen b res
else begin
let tmp = create tmpLen (u8 0) in
update_sub tmp (tmpLen -! len) len b;
bn_from_bytes_be_ bnLen tmp res end end
else begin
[@inline_let] let numb = size (numbytes t) in
let bnLen = blocks len numb in
let tmpLen = numb *! bnLen in
let tmp = create tmpLen (u8 0) in
update_sub tmp (tmpLen -! len) len b;
bn_from_bytes_be_ bnLen tmp res end;
pop_frame ()
[@CInline]
let bn_from_bytes_be_uint32 : bn_from_bytes_be_st U32 = mk_bn_from_bytes_be #U32 false
let bn_from_bytes_be_uint64 : bn_from_bytes_be_st U64 = mk_bn_from_bytes_be #U64 false
inline_for_extraction noextract
val bn_from_bytes_be: #t:limb_t -> bn_from_bytes_be_st t
let bn_from_bytes_be #t =
match t with
| U32 -> bn_from_bytes_be_uint32
| U64 -> bn_from_bytes_be_uint64
inline_for_extraction noextract
let bn_from_bytes_le_st (t:limb_t) =
len:size_t{0 < v len /\ numbytes t * v (blocks len (size (numbytes t))) <= max_size_t}
-> b:lbuffer uint8 len
-> res:lbignum t (blocks len (size (numbytes t))) ->
Stack unit
(requires fun h -> live h b /\ live h res /\ disjoint res b)
(ensures fun h0 _ h1 -> modifies (loc res) h0 h1 /\
as_seq h1 res == S.bn_from_bytes_le (v len) (as_seq h0 b))
inline_for_extraction noextract
val mk_bn_from_bytes_le: #t:limb_t -> is_known_len:bool -> bn_from_bytes_le_st t
let mk_bn_from_bytes_le #t is_known_len len b res =
push_frame ();
if is_known_len then begin
[@inline_let] let numb = size (numbytes t) in
[@inline_let] let bnLen = blocks len numb in
[@inline_let] let tmpLen = numb *! bnLen in
if tmpLen =. len then
uints_from_bytes_le res b
else begin
let tmp = create tmpLen (u8 0) in
update_sub tmp 0ul len b;
uints_from_bytes_le res tmp end end
else begin
[@inline_let] let numb = size (numbytes t) in
let bnLen = blocks len numb in
let tmpLen = numb *! bnLen in
let tmp = create tmpLen (u8 0) in
update_sub tmp 0ul len b;
uints_from_bytes_le res tmp end;
pop_frame () | false | true | Hacl.Bignum.Convert.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 50,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val bn_from_bytes_le_uint32:bn_from_bytes_le_st U32 | [] | Hacl.Bignum.Convert.bn_from_bytes_le_uint32 | {
"file_name": "code/bignum/Hacl.Bignum.Convert.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | Hacl.Bignum.Convert.bn_from_bytes_le_st Lib.IntTypes.U32 | {
"end_col": 86,
"end_line": 139,
"start_col": 56,
"start_line": 139
} |
Prims.Tot | val bn_from_bytes_be_uint32:bn_from_bytes_be_st U32 | [
{
"abbrev": true,
"full_module": "Hacl.Spec.Bignum.Convert",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Lib.Sequence",
"short_module": "LSeq"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "ST"
},
{
"abbrev": false,
"full_module": "Hacl.Bignum.Definitions",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteBuffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Buffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack.ST",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Bignum",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Bignum",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let bn_from_bytes_be_uint32 : bn_from_bytes_be_st U32 = mk_bn_from_bytes_be #U32 false | val bn_from_bytes_be_uint32:bn_from_bytes_be_st U32
let bn_from_bytes_be_uint32:bn_from_bytes_be_st U32 = | false | null | false | mk_bn_from_bytes_be #U32 false | {
"checked_file": "Hacl.Bignum.Convert.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Ignore.fsti.checked",
"LowStar.Buffer.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteBuffer.fsti.checked",
"Lib.Buffer.fsti.checked",
"Hacl.Spec.Bignum.Convert.fst.checked",
"Hacl.Bignum.Definitions.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked"
],
"interface_file": false,
"source_file": "Hacl.Bignum.Convert.fst"
} | [
"total"
] | [
"Hacl.Bignum.Convert.mk_bn_from_bytes_be",
"Lib.IntTypes.U32"
] | [] | module Hacl.Bignum.Convert
open FStar.HyperStack
open FStar.HyperStack.ST
open FStar.Mul
open Lib.IntTypes
open Lib.Buffer
open Lib.ByteBuffer
open Hacl.Bignum.Definitions
module ST = FStar.HyperStack.ST
module LSeq = Lib.Sequence
module S = Hacl.Spec.Bignum.Convert
#reset-options "--z3rlimit 50 --fuel 0 --ifuel 0"
inline_for_extraction noextract
val bn_from_uint:
#t:limb_t
-> len:size_t{0 < v len}
-> x:limb t
-> b:lbignum t len ->
Stack unit
(requires fun h -> live h b)
(ensures fun h0 _ h1 -> modifies (loc b) h0 h1 /\
as_seq h1 b == S.bn_from_uint (v len) x)
let bn_from_uint #t len x b =
memset b (uint #t 0) len;
b.(0ul) <- x
inline_for_extraction noextract
val bn_from_bytes_be_:
#t:limb_t
-> len:size_t{numbytes t * v len <= max_size_t}
-> b:lbuffer uint8 (size (numbytes t) *! len)
-> res:lbignum t len ->
Stack unit
(requires fun h -> live h b /\ live h res /\ disjoint res b)
(ensures fun h0 _ h1 -> modifies (loc res) h0 h1 /\
as_seq h1 res == S.bn_from_bytes_be_ (v len) (as_seq h0 b))
let bn_from_bytes_be_ #t len b res =
let h0 = ST.get () in
[@inline_let]
let spec h = S.bn_from_bytes_be_f (v len) (as_seq h b) in
fill h0 len res spec
(fun j -> uint_from_bytes_be (sub b ((len -! j -! 1ul) *! (size (numbytes t))) (size (numbytes t))))
inline_for_extraction noextract
let bn_from_bytes_be_st (t:limb_t) =
len:size_t{0 < v len /\ numbytes t * v (blocks len (size (numbytes t))) <= max_size_t}
-> b:lbuffer uint8 len
-> res:lbignum t (blocks len (size (numbytes t))) ->
Stack unit
(requires fun h -> live h b /\ live h res /\ disjoint res b)
(ensures fun h0 _ h1 -> modifies (loc res) h0 h1 /\
as_seq h1 res == S.bn_from_bytes_be (v len) (as_seq h0 b))
inline_for_extraction noextract
val mk_bn_from_bytes_be: #t:limb_t -> is_known_len:bool -> bn_from_bytes_be_st t
let mk_bn_from_bytes_be #t is_known_len len b res =
push_frame ();
if is_known_len then begin
[@inline_let] let numb = size (numbytes t) in
[@inline_let] let bnLen = blocks len numb in
[@inline_let] let tmpLen = numb *! bnLen in
if tmpLen =. len then
bn_from_bytes_be_ bnLen b res
else begin
let tmp = create tmpLen (u8 0) in
update_sub tmp (tmpLen -! len) len b;
bn_from_bytes_be_ bnLen tmp res end end
else begin
[@inline_let] let numb = size (numbytes t) in
let bnLen = blocks len numb in
let tmpLen = numb *! bnLen in
let tmp = create tmpLen (u8 0) in
update_sub tmp (tmpLen -! len) len b;
bn_from_bytes_be_ bnLen tmp res end;
pop_frame () | false | true | Hacl.Bignum.Convert.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 50,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val bn_from_bytes_be_uint32:bn_from_bytes_be_st U32 | [] | Hacl.Bignum.Convert.bn_from_bytes_be_uint32 | {
"file_name": "code/bignum/Hacl.Bignum.Convert.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | Hacl.Bignum.Convert.bn_from_bytes_be_st Lib.IntTypes.U32 | {
"end_col": 86,
"end_line": 91,
"start_col": 56,
"start_line": 91
} |
Prims.Tot | val bn_from_bytes_be_uint64:bn_from_bytes_be_st U64 | [
{
"abbrev": true,
"full_module": "Hacl.Spec.Bignum.Convert",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Lib.Sequence",
"short_module": "LSeq"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "ST"
},
{
"abbrev": false,
"full_module": "Hacl.Bignum.Definitions",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteBuffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Buffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack.ST",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Bignum",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Bignum",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let bn_from_bytes_be_uint64 : bn_from_bytes_be_st U64 = mk_bn_from_bytes_be #U64 false | val bn_from_bytes_be_uint64:bn_from_bytes_be_st U64
let bn_from_bytes_be_uint64:bn_from_bytes_be_st U64 = | false | null | false | mk_bn_from_bytes_be #U64 false | {
"checked_file": "Hacl.Bignum.Convert.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Ignore.fsti.checked",
"LowStar.Buffer.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteBuffer.fsti.checked",
"Lib.Buffer.fsti.checked",
"Hacl.Spec.Bignum.Convert.fst.checked",
"Hacl.Bignum.Definitions.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked"
],
"interface_file": false,
"source_file": "Hacl.Bignum.Convert.fst"
} | [
"total"
] | [
"Hacl.Bignum.Convert.mk_bn_from_bytes_be",
"Lib.IntTypes.U64"
] | [] | module Hacl.Bignum.Convert
open FStar.HyperStack
open FStar.HyperStack.ST
open FStar.Mul
open Lib.IntTypes
open Lib.Buffer
open Lib.ByteBuffer
open Hacl.Bignum.Definitions
module ST = FStar.HyperStack.ST
module LSeq = Lib.Sequence
module S = Hacl.Spec.Bignum.Convert
#reset-options "--z3rlimit 50 --fuel 0 --ifuel 0"
inline_for_extraction noextract
val bn_from_uint:
#t:limb_t
-> len:size_t{0 < v len}
-> x:limb t
-> b:lbignum t len ->
Stack unit
(requires fun h -> live h b)
(ensures fun h0 _ h1 -> modifies (loc b) h0 h1 /\
as_seq h1 b == S.bn_from_uint (v len) x)
let bn_from_uint #t len x b =
memset b (uint #t 0) len;
b.(0ul) <- x
inline_for_extraction noextract
val bn_from_bytes_be_:
#t:limb_t
-> len:size_t{numbytes t * v len <= max_size_t}
-> b:lbuffer uint8 (size (numbytes t) *! len)
-> res:lbignum t len ->
Stack unit
(requires fun h -> live h b /\ live h res /\ disjoint res b)
(ensures fun h0 _ h1 -> modifies (loc res) h0 h1 /\
as_seq h1 res == S.bn_from_bytes_be_ (v len) (as_seq h0 b))
let bn_from_bytes_be_ #t len b res =
let h0 = ST.get () in
[@inline_let]
let spec h = S.bn_from_bytes_be_f (v len) (as_seq h b) in
fill h0 len res spec
(fun j -> uint_from_bytes_be (sub b ((len -! j -! 1ul) *! (size (numbytes t))) (size (numbytes t))))
inline_for_extraction noextract
let bn_from_bytes_be_st (t:limb_t) =
len:size_t{0 < v len /\ numbytes t * v (blocks len (size (numbytes t))) <= max_size_t}
-> b:lbuffer uint8 len
-> res:lbignum t (blocks len (size (numbytes t))) ->
Stack unit
(requires fun h -> live h b /\ live h res /\ disjoint res b)
(ensures fun h0 _ h1 -> modifies (loc res) h0 h1 /\
as_seq h1 res == S.bn_from_bytes_be (v len) (as_seq h0 b))
inline_for_extraction noextract
val mk_bn_from_bytes_be: #t:limb_t -> is_known_len:bool -> bn_from_bytes_be_st t
let mk_bn_from_bytes_be #t is_known_len len b res =
push_frame ();
if is_known_len then begin
[@inline_let] let numb = size (numbytes t) in
[@inline_let] let bnLen = blocks len numb in
[@inline_let] let tmpLen = numb *! bnLen in
if tmpLen =. len then
bn_from_bytes_be_ bnLen b res
else begin
let tmp = create tmpLen (u8 0) in
update_sub tmp (tmpLen -! len) len b;
bn_from_bytes_be_ bnLen tmp res end end
else begin
[@inline_let] let numb = size (numbytes t) in
let bnLen = blocks len numb in
let tmpLen = numb *! bnLen in
let tmp = create tmpLen (u8 0) in
update_sub tmp (tmpLen -! len) len b;
bn_from_bytes_be_ bnLen tmp res end;
pop_frame ()
[@CInline] | false | true | Hacl.Bignum.Convert.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 50,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val bn_from_bytes_be_uint64:bn_from_bytes_be_st U64 | [] | Hacl.Bignum.Convert.bn_from_bytes_be_uint64 | {
"file_name": "code/bignum/Hacl.Bignum.Convert.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | Hacl.Bignum.Convert.bn_from_bytes_be_st Lib.IntTypes.U64 | {
"end_col": 86,
"end_line": 92,
"start_col": 56,
"start_line": 92
} |
Prims.Tot | val bn_from_bytes_be: #t:limb_t -> bn_from_bytes_be_st t | [
{
"abbrev": true,
"full_module": "Hacl.Spec.Bignum.Convert",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Lib.Sequence",
"short_module": "LSeq"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "ST"
},
{
"abbrev": false,
"full_module": "Hacl.Bignum.Definitions",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteBuffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Buffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack.ST",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Bignum",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Bignum",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let bn_from_bytes_be #t =
match t with
| U32 -> bn_from_bytes_be_uint32
| U64 -> bn_from_bytes_be_uint64 | val bn_from_bytes_be: #t:limb_t -> bn_from_bytes_be_st t
let bn_from_bytes_be #t = | false | null | false | match t with
| U32 -> bn_from_bytes_be_uint32
| U64 -> bn_from_bytes_be_uint64 | {
"checked_file": "Hacl.Bignum.Convert.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Ignore.fsti.checked",
"LowStar.Buffer.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteBuffer.fsti.checked",
"Lib.Buffer.fsti.checked",
"Hacl.Spec.Bignum.Convert.fst.checked",
"Hacl.Bignum.Definitions.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked"
],
"interface_file": false,
"source_file": "Hacl.Bignum.Convert.fst"
} | [
"total"
] | [
"Hacl.Bignum.Definitions.limb_t",
"Hacl.Bignum.Convert.bn_from_bytes_be_uint32",
"Hacl.Bignum.Convert.bn_from_bytes_be_uint64",
"Hacl.Bignum.Convert.bn_from_bytes_be_st"
] | [] | module Hacl.Bignum.Convert
open FStar.HyperStack
open FStar.HyperStack.ST
open FStar.Mul
open Lib.IntTypes
open Lib.Buffer
open Lib.ByteBuffer
open Hacl.Bignum.Definitions
module ST = FStar.HyperStack.ST
module LSeq = Lib.Sequence
module S = Hacl.Spec.Bignum.Convert
#reset-options "--z3rlimit 50 --fuel 0 --ifuel 0"
inline_for_extraction noextract
val bn_from_uint:
#t:limb_t
-> len:size_t{0 < v len}
-> x:limb t
-> b:lbignum t len ->
Stack unit
(requires fun h -> live h b)
(ensures fun h0 _ h1 -> modifies (loc b) h0 h1 /\
as_seq h1 b == S.bn_from_uint (v len) x)
let bn_from_uint #t len x b =
memset b (uint #t 0) len;
b.(0ul) <- x
inline_for_extraction noextract
val bn_from_bytes_be_:
#t:limb_t
-> len:size_t{numbytes t * v len <= max_size_t}
-> b:lbuffer uint8 (size (numbytes t) *! len)
-> res:lbignum t len ->
Stack unit
(requires fun h -> live h b /\ live h res /\ disjoint res b)
(ensures fun h0 _ h1 -> modifies (loc res) h0 h1 /\
as_seq h1 res == S.bn_from_bytes_be_ (v len) (as_seq h0 b))
let bn_from_bytes_be_ #t len b res =
let h0 = ST.get () in
[@inline_let]
let spec h = S.bn_from_bytes_be_f (v len) (as_seq h b) in
fill h0 len res spec
(fun j -> uint_from_bytes_be (sub b ((len -! j -! 1ul) *! (size (numbytes t))) (size (numbytes t))))
inline_for_extraction noextract
let bn_from_bytes_be_st (t:limb_t) =
len:size_t{0 < v len /\ numbytes t * v (blocks len (size (numbytes t))) <= max_size_t}
-> b:lbuffer uint8 len
-> res:lbignum t (blocks len (size (numbytes t))) ->
Stack unit
(requires fun h -> live h b /\ live h res /\ disjoint res b)
(ensures fun h0 _ h1 -> modifies (loc res) h0 h1 /\
as_seq h1 res == S.bn_from_bytes_be (v len) (as_seq h0 b))
inline_for_extraction noextract
val mk_bn_from_bytes_be: #t:limb_t -> is_known_len:bool -> bn_from_bytes_be_st t
let mk_bn_from_bytes_be #t is_known_len len b res =
push_frame ();
if is_known_len then begin
[@inline_let] let numb = size (numbytes t) in
[@inline_let] let bnLen = blocks len numb in
[@inline_let] let tmpLen = numb *! bnLen in
if tmpLen =. len then
bn_from_bytes_be_ bnLen b res
else begin
let tmp = create tmpLen (u8 0) in
update_sub tmp (tmpLen -! len) len b;
bn_from_bytes_be_ bnLen tmp res end end
else begin
[@inline_let] let numb = size (numbytes t) in
let bnLen = blocks len numb in
let tmpLen = numb *! bnLen in
let tmp = create tmpLen (u8 0) in
update_sub tmp (tmpLen -! len) len b;
bn_from_bytes_be_ bnLen tmp res end;
pop_frame ()
[@CInline]
let bn_from_bytes_be_uint32 : bn_from_bytes_be_st U32 = mk_bn_from_bytes_be #U32 false
let bn_from_bytes_be_uint64 : bn_from_bytes_be_st U64 = mk_bn_from_bytes_be #U64 false
inline_for_extraction noextract
val bn_from_bytes_be: #t:limb_t -> bn_from_bytes_be_st t | false | false | Hacl.Bignum.Convert.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 50,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val bn_from_bytes_be: #t:limb_t -> bn_from_bytes_be_st t | [] | Hacl.Bignum.Convert.bn_from_bytes_be | {
"file_name": "code/bignum/Hacl.Bignum.Convert.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | Hacl.Bignum.Convert.bn_from_bytes_be_st t | {
"end_col": 34,
"end_line": 100,
"start_col": 2,
"start_line": 98
} |
Prims.Tot | val bn_from_bytes_le: #t:limb_t -> bn_from_bytes_le_st t | [
{
"abbrev": true,
"full_module": "Hacl.Spec.Bignum.Convert",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Lib.Sequence",
"short_module": "LSeq"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "ST"
},
{
"abbrev": false,
"full_module": "Hacl.Bignum.Definitions",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteBuffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Buffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack.ST",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Bignum",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Bignum",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let bn_from_bytes_le #t =
match t with
| U32 -> bn_from_bytes_le_uint32
| U64 -> bn_from_bytes_le_uint64 | val bn_from_bytes_le: #t:limb_t -> bn_from_bytes_le_st t
let bn_from_bytes_le #t = | false | null | false | match t with
| U32 -> bn_from_bytes_le_uint32
| U64 -> bn_from_bytes_le_uint64 | {
"checked_file": "Hacl.Bignum.Convert.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Ignore.fsti.checked",
"LowStar.Buffer.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteBuffer.fsti.checked",
"Lib.Buffer.fsti.checked",
"Hacl.Spec.Bignum.Convert.fst.checked",
"Hacl.Bignum.Definitions.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked"
],
"interface_file": false,
"source_file": "Hacl.Bignum.Convert.fst"
} | [
"total"
] | [
"Hacl.Bignum.Definitions.limb_t",
"Hacl.Bignum.Convert.bn_from_bytes_le_uint32",
"Hacl.Bignum.Convert.bn_from_bytes_le_uint64",
"Hacl.Bignum.Convert.bn_from_bytes_le_st"
] | [] | module Hacl.Bignum.Convert
open FStar.HyperStack
open FStar.HyperStack.ST
open FStar.Mul
open Lib.IntTypes
open Lib.Buffer
open Lib.ByteBuffer
open Hacl.Bignum.Definitions
module ST = FStar.HyperStack.ST
module LSeq = Lib.Sequence
module S = Hacl.Spec.Bignum.Convert
#reset-options "--z3rlimit 50 --fuel 0 --ifuel 0"
inline_for_extraction noextract
val bn_from_uint:
#t:limb_t
-> len:size_t{0 < v len}
-> x:limb t
-> b:lbignum t len ->
Stack unit
(requires fun h -> live h b)
(ensures fun h0 _ h1 -> modifies (loc b) h0 h1 /\
as_seq h1 b == S.bn_from_uint (v len) x)
let bn_from_uint #t len x b =
memset b (uint #t 0) len;
b.(0ul) <- x
inline_for_extraction noextract
val bn_from_bytes_be_:
#t:limb_t
-> len:size_t{numbytes t * v len <= max_size_t}
-> b:lbuffer uint8 (size (numbytes t) *! len)
-> res:lbignum t len ->
Stack unit
(requires fun h -> live h b /\ live h res /\ disjoint res b)
(ensures fun h0 _ h1 -> modifies (loc res) h0 h1 /\
as_seq h1 res == S.bn_from_bytes_be_ (v len) (as_seq h0 b))
let bn_from_bytes_be_ #t len b res =
let h0 = ST.get () in
[@inline_let]
let spec h = S.bn_from_bytes_be_f (v len) (as_seq h b) in
fill h0 len res spec
(fun j -> uint_from_bytes_be (sub b ((len -! j -! 1ul) *! (size (numbytes t))) (size (numbytes t))))
inline_for_extraction noextract
let bn_from_bytes_be_st (t:limb_t) =
len:size_t{0 < v len /\ numbytes t * v (blocks len (size (numbytes t))) <= max_size_t}
-> b:lbuffer uint8 len
-> res:lbignum t (blocks len (size (numbytes t))) ->
Stack unit
(requires fun h -> live h b /\ live h res /\ disjoint res b)
(ensures fun h0 _ h1 -> modifies (loc res) h0 h1 /\
as_seq h1 res == S.bn_from_bytes_be (v len) (as_seq h0 b))
inline_for_extraction noextract
val mk_bn_from_bytes_be: #t:limb_t -> is_known_len:bool -> bn_from_bytes_be_st t
let mk_bn_from_bytes_be #t is_known_len len b res =
push_frame ();
if is_known_len then begin
[@inline_let] let numb = size (numbytes t) in
[@inline_let] let bnLen = blocks len numb in
[@inline_let] let tmpLen = numb *! bnLen in
if tmpLen =. len then
bn_from_bytes_be_ bnLen b res
else begin
let tmp = create tmpLen (u8 0) in
update_sub tmp (tmpLen -! len) len b;
bn_from_bytes_be_ bnLen tmp res end end
else begin
[@inline_let] let numb = size (numbytes t) in
let bnLen = blocks len numb in
let tmpLen = numb *! bnLen in
let tmp = create tmpLen (u8 0) in
update_sub tmp (tmpLen -! len) len b;
bn_from_bytes_be_ bnLen tmp res end;
pop_frame ()
[@CInline]
let bn_from_bytes_be_uint32 : bn_from_bytes_be_st U32 = mk_bn_from_bytes_be #U32 false
let bn_from_bytes_be_uint64 : bn_from_bytes_be_st U64 = mk_bn_from_bytes_be #U64 false
inline_for_extraction noextract
val bn_from_bytes_be: #t:limb_t -> bn_from_bytes_be_st t
let bn_from_bytes_be #t =
match t with
| U32 -> bn_from_bytes_be_uint32
| U64 -> bn_from_bytes_be_uint64
inline_for_extraction noextract
let bn_from_bytes_le_st (t:limb_t) =
len:size_t{0 < v len /\ numbytes t * v (blocks len (size (numbytes t))) <= max_size_t}
-> b:lbuffer uint8 len
-> res:lbignum t (blocks len (size (numbytes t))) ->
Stack unit
(requires fun h -> live h b /\ live h res /\ disjoint res b)
(ensures fun h0 _ h1 -> modifies (loc res) h0 h1 /\
as_seq h1 res == S.bn_from_bytes_le (v len) (as_seq h0 b))
inline_for_extraction noextract
val mk_bn_from_bytes_le: #t:limb_t -> is_known_len:bool -> bn_from_bytes_le_st t
let mk_bn_from_bytes_le #t is_known_len len b res =
push_frame ();
if is_known_len then begin
[@inline_let] let numb = size (numbytes t) in
[@inline_let] let bnLen = blocks len numb in
[@inline_let] let tmpLen = numb *! bnLen in
if tmpLen =. len then
uints_from_bytes_le res b
else begin
let tmp = create tmpLen (u8 0) in
update_sub tmp 0ul len b;
uints_from_bytes_le res tmp end end
else begin
[@inline_let] let numb = size (numbytes t) in
let bnLen = blocks len numb in
let tmpLen = numb *! bnLen in
let tmp = create tmpLen (u8 0) in
update_sub tmp 0ul len b;
uints_from_bytes_le res tmp end;
pop_frame ()
[@CInline]
let bn_from_bytes_le_uint32 : bn_from_bytes_le_st U32 = mk_bn_from_bytes_le #U32 false
[@CInline]
let bn_from_bytes_le_uint64 : bn_from_bytes_le_st U64 = mk_bn_from_bytes_le #U64 false
inline_for_extraction noextract
val bn_from_bytes_le: #t:limb_t -> bn_from_bytes_le_st t | false | false | Hacl.Bignum.Convert.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 50,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val bn_from_bytes_le: #t:limb_t -> bn_from_bytes_le_st t | [] | Hacl.Bignum.Convert.bn_from_bytes_le | {
"file_name": "code/bignum/Hacl.Bignum.Convert.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | Hacl.Bignum.Convert.bn_from_bytes_le_st t | {
"end_col": 34,
"end_line": 149,
"start_col": 2,
"start_line": 147
} |
Prims.Tot | [
{
"abbrev": true,
"full_module": "Hacl.Spec.Bignum.Convert",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Lib.Sequence",
"short_module": "LSeq"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "ST"
},
{
"abbrev": false,
"full_module": "Hacl.Bignum.Definitions",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteBuffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Buffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack.ST",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Bignum",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Bignum",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let bn_from_bytes_be_st (t:limb_t) =
len:size_t{0 < v len /\ numbytes t * v (blocks len (size (numbytes t))) <= max_size_t}
-> b:lbuffer uint8 len
-> res:lbignum t (blocks len (size (numbytes t))) ->
Stack unit
(requires fun h -> live h b /\ live h res /\ disjoint res b)
(ensures fun h0 _ h1 -> modifies (loc res) h0 h1 /\
as_seq h1 res == S.bn_from_bytes_be (v len) (as_seq h0 b)) | let bn_from_bytes_be_st (t: limb_t) = | false | null | false |
len: size_t{0 < v len /\ numbytes t * v (blocks len (size (numbytes t))) <= max_size_t} ->
b: lbuffer uint8 len ->
res: lbignum t (blocks len (size (numbytes t)))
-> Stack unit
(requires fun h -> live h b /\ live h res /\ disjoint res b)
(ensures
fun h0 _ h1 ->
modifies (loc res) h0 h1 /\ as_seq h1 res == S.bn_from_bytes_be (v len) (as_seq h0 b)) | {
"checked_file": "Hacl.Bignum.Convert.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Ignore.fsti.checked",
"LowStar.Buffer.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteBuffer.fsti.checked",
"Lib.Buffer.fsti.checked",
"Hacl.Spec.Bignum.Convert.fst.checked",
"Hacl.Bignum.Definitions.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked"
],
"interface_file": false,
"source_file": "Hacl.Bignum.Convert.fst"
} | [
"total"
] | [
"Hacl.Bignum.Definitions.limb_t",
"Lib.IntTypes.size_t",
"Prims.l_and",
"Prims.b2t",
"Prims.op_LessThan",
"Lib.IntTypes.v",
"Lib.IntTypes.U32",
"Lib.IntTypes.PUB",
"Prims.op_LessThanOrEqual",
"FStar.Mul.op_Star",
"Lib.IntTypes.numbytes",
"Hacl.Bignum.Definitions.blocks",
"Lib.IntTypes.size",
"Lib.IntTypes.max_size_t",
"Lib.Buffer.lbuffer",
"Lib.IntTypes.uint8",
"Hacl.Bignum.Definitions.lbignum",
"Prims.unit",
"FStar.Monotonic.HyperStack.mem",
"Lib.Buffer.live",
"Lib.Buffer.MUT",
"Hacl.Bignum.Definitions.limb",
"Lib.Buffer.disjoint",
"Lib.Buffer.modifies",
"Lib.Buffer.loc",
"Prims.eq2",
"Lib.Sequence.seq",
"Prims.l_or",
"Prims.nat",
"FStar.Seq.Base.length",
"Hacl.Spec.Bignum.Definitions.limb",
"Hacl.Spec.Bignum.Definitions.blocks",
"Lib.Buffer.as_seq",
"Hacl.Spec.Bignum.Convert.bn_from_bytes_be"
] | [] | module Hacl.Bignum.Convert
open FStar.HyperStack
open FStar.HyperStack.ST
open FStar.Mul
open Lib.IntTypes
open Lib.Buffer
open Lib.ByteBuffer
open Hacl.Bignum.Definitions
module ST = FStar.HyperStack.ST
module LSeq = Lib.Sequence
module S = Hacl.Spec.Bignum.Convert
#reset-options "--z3rlimit 50 --fuel 0 --ifuel 0"
inline_for_extraction noextract
val bn_from_uint:
#t:limb_t
-> len:size_t{0 < v len}
-> x:limb t
-> b:lbignum t len ->
Stack unit
(requires fun h -> live h b)
(ensures fun h0 _ h1 -> modifies (loc b) h0 h1 /\
as_seq h1 b == S.bn_from_uint (v len) x)
let bn_from_uint #t len x b =
memset b (uint #t 0) len;
b.(0ul) <- x
inline_for_extraction noextract
val bn_from_bytes_be_:
#t:limb_t
-> len:size_t{numbytes t * v len <= max_size_t}
-> b:lbuffer uint8 (size (numbytes t) *! len)
-> res:lbignum t len ->
Stack unit
(requires fun h -> live h b /\ live h res /\ disjoint res b)
(ensures fun h0 _ h1 -> modifies (loc res) h0 h1 /\
as_seq h1 res == S.bn_from_bytes_be_ (v len) (as_seq h0 b))
let bn_from_bytes_be_ #t len b res =
let h0 = ST.get () in
[@inline_let]
let spec h = S.bn_from_bytes_be_f (v len) (as_seq h b) in
fill h0 len res spec
(fun j -> uint_from_bytes_be (sub b ((len -! j -! 1ul) *! (size (numbytes t))) (size (numbytes t))))
inline_for_extraction noextract | false | true | Hacl.Bignum.Convert.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 50,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val bn_from_bytes_be_st : t: Hacl.Bignum.Definitions.limb_t -> Type0 | [] | Hacl.Bignum.Convert.bn_from_bytes_be_st | {
"file_name": "code/bignum/Hacl.Bignum.Convert.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | t: Hacl.Bignum.Definitions.limb_t -> Type0 | {
"end_col": 62,
"end_line": 63,
"start_col": 4,
"start_line": 57
} |
|
Prims.Tot | val bn_to_bytes_be_uint32 (len: _) : bn_to_bytes_be_st U32 len | [
{
"abbrev": true,
"full_module": "Hacl.Spec.Bignum.Convert",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Lib.Sequence",
"short_module": "LSeq"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "ST"
},
{
"abbrev": false,
"full_module": "Hacl.Bignum.Definitions",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteBuffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Buffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack.ST",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Bignum",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Bignum",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let bn_to_bytes_be_uint32 len : bn_to_bytes_be_st U32 len = mk_bn_to_bytes_be #U32 false len | val bn_to_bytes_be_uint32 (len: _) : bn_to_bytes_be_st U32 len
let bn_to_bytes_be_uint32 len : bn_to_bytes_be_st U32 len = | false | null | false | mk_bn_to_bytes_be #U32 false len | {
"checked_file": "Hacl.Bignum.Convert.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Ignore.fsti.checked",
"LowStar.Buffer.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteBuffer.fsti.checked",
"Lib.Buffer.fsti.checked",
"Hacl.Spec.Bignum.Convert.fst.checked",
"Hacl.Bignum.Definitions.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked"
],
"interface_file": false,
"source_file": "Hacl.Bignum.Convert.fst"
} | [
"total"
] | [
"Lib.IntTypes.size_t",
"Prims.l_and",
"Prims.b2t",
"Prims.op_LessThan",
"Lib.IntTypes.v",
"Lib.IntTypes.U32",
"Lib.IntTypes.PUB",
"Prims.op_LessThanOrEqual",
"FStar.Mul.op_Star",
"Lib.IntTypes.numbytes",
"Hacl.Bignum.Definitions.blocks",
"Lib.IntTypes.size",
"Lib.IntTypes.max_size_t",
"Hacl.Bignum.Convert.mk_bn_to_bytes_be",
"Hacl.Bignum.Convert.bn_to_bytes_be_st"
] | [] | module Hacl.Bignum.Convert
open FStar.HyperStack
open FStar.HyperStack.ST
open FStar.Mul
open Lib.IntTypes
open Lib.Buffer
open Lib.ByteBuffer
open Hacl.Bignum.Definitions
module ST = FStar.HyperStack.ST
module LSeq = Lib.Sequence
module S = Hacl.Spec.Bignum.Convert
#reset-options "--z3rlimit 50 --fuel 0 --ifuel 0"
inline_for_extraction noextract
val bn_from_uint:
#t:limb_t
-> len:size_t{0 < v len}
-> x:limb t
-> b:lbignum t len ->
Stack unit
(requires fun h -> live h b)
(ensures fun h0 _ h1 -> modifies (loc b) h0 h1 /\
as_seq h1 b == S.bn_from_uint (v len) x)
let bn_from_uint #t len x b =
memset b (uint #t 0) len;
b.(0ul) <- x
inline_for_extraction noextract
val bn_from_bytes_be_:
#t:limb_t
-> len:size_t{numbytes t * v len <= max_size_t}
-> b:lbuffer uint8 (size (numbytes t) *! len)
-> res:lbignum t len ->
Stack unit
(requires fun h -> live h b /\ live h res /\ disjoint res b)
(ensures fun h0 _ h1 -> modifies (loc res) h0 h1 /\
as_seq h1 res == S.bn_from_bytes_be_ (v len) (as_seq h0 b))
let bn_from_bytes_be_ #t len b res =
let h0 = ST.get () in
[@inline_let]
let spec h = S.bn_from_bytes_be_f (v len) (as_seq h b) in
fill h0 len res spec
(fun j -> uint_from_bytes_be (sub b ((len -! j -! 1ul) *! (size (numbytes t))) (size (numbytes t))))
inline_for_extraction noextract
let bn_from_bytes_be_st (t:limb_t) =
len:size_t{0 < v len /\ numbytes t * v (blocks len (size (numbytes t))) <= max_size_t}
-> b:lbuffer uint8 len
-> res:lbignum t (blocks len (size (numbytes t))) ->
Stack unit
(requires fun h -> live h b /\ live h res /\ disjoint res b)
(ensures fun h0 _ h1 -> modifies (loc res) h0 h1 /\
as_seq h1 res == S.bn_from_bytes_be (v len) (as_seq h0 b))
inline_for_extraction noextract
val mk_bn_from_bytes_be: #t:limb_t -> is_known_len:bool -> bn_from_bytes_be_st t
let mk_bn_from_bytes_be #t is_known_len len b res =
push_frame ();
if is_known_len then begin
[@inline_let] let numb = size (numbytes t) in
[@inline_let] let bnLen = blocks len numb in
[@inline_let] let tmpLen = numb *! bnLen in
if tmpLen =. len then
bn_from_bytes_be_ bnLen b res
else begin
let tmp = create tmpLen (u8 0) in
update_sub tmp (tmpLen -! len) len b;
bn_from_bytes_be_ bnLen tmp res end end
else begin
[@inline_let] let numb = size (numbytes t) in
let bnLen = blocks len numb in
let tmpLen = numb *! bnLen in
let tmp = create tmpLen (u8 0) in
update_sub tmp (tmpLen -! len) len b;
bn_from_bytes_be_ bnLen tmp res end;
pop_frame ()
[@CInline]
let bn_from_bytes_be_uint32 : bn_from_bytes_be_st U32 = mk_bn_from_bytes_be #U32 false
let bn_from_bytes_be_uint64 : bn_from_bytes_be_st U64 = mk_bn_from_bytes_be #U64 false
inline_for_extraction noextract
val bn_from_bytes_be: #t:limb_t -> bn_from_bytes_be_st t
let bn_from_bytes_be #t =
match t with
| U32 -> bn_from_bytes_be_uint32
| U64 -> bn_from_bytes_be_uint64
inline_for_extraction noextract
let bn_from_bytes_le_st (t:limb_t) =
len:size_t{0 < v len /\ numbytes t * v (blocks len (size (numbytes t))) <= max_size_t}
-> b:lbuffer uint8 len
-> res:lbignum t (blocks len (size (numbytes t))) ->
Stack unit
(requires fun h -> live h b /\ live h res /\ disjoint res b)
(ensures fun h0 _ h1 -> modifies (loc res) h0 h1 /\
as_seq h1 res == S.bn_from_bytes_le (v len) (as_seq h0 b))
inline_for_extraction noextract
val mk_bn_from_bytes_le: #t:limb_t -> is_known_len:bool -> bn_from_bytes_le_st t
let mk_bn_from_bytes_le #t is_known_len len b res =
push_frame ();
if is_known_len then begin
[@inline_let] let numb = size (numbytes t) in
[@inline_let] let bnLen = blocks len numb in
[@inline_let] let tmpLen = numb *! bnLen in
if tmpLen =. len then
uints_from_bytes_le res b
else begin
let tmp = create tmpLen (u8 0) in
update_sub tmp 0ul len b;
uints_from_bytes_le res tmp end end
else begin
[@inline_let] let numb = size (numbytes t) in
let bnLen = blocks len numb in
let tmpLen = numb *! bnLen in
let tmp = create tmpLen (u8 0) in
update_sub tmp 0ul len b;
uints_from_bytes_le res tmp end;
pop_frame ()
[@CInline]
let bn_from_bytes_le_uint32 : bn_from_bytes_le_st U32 = mk_bn_from_bytes_le #U32 false
[@CInline]
let bn_from_bytes_le_uint64 : bn_from_bytes_le_st U64 = mk_bn_from_bytes_le #U64 false
inline_for_extraction noextract
val bn_from_bytes_le: #t:limb_t -> bn_from_bytes_le_st t
let bn_from_bytes_le #t =
match t with
| U32 -> bn_from_bytes_le_uint32
| U64 -> bn_from_bytes_le_uint64
inline_for_extraction noextract
val bn_to_bytes_be_:
#t:limb_t
-> len:size_t{numbytes t * v len <= max_size_t}
-> b:lbignum t len
-> res:lbuffer uint8 (size (numbytes t) *! len) ->
Stack unit
(requires fun h -> live h b /\ live h res /\ disjoint res b)
(ensures fun h0 _ h1 -> modifies (loc res) h0 h1 /\
as_seq h1 res == S.bn_to_bytes_be_ (v len) (as_seq h0 b))
let bn_to_bytes_be_ #t len b res =
[@inline_let] let numb = size (numbytes t) in
let h0 = ST.get () in
[@ inline_let]
let a_spec (i:nat{i <= v len}) = unit in
[@ inline_let]
let spec (h:mem) = S.bn_to_bytes_be_f (v len) (as_seq h b) in
fill_blocks h0 numb len res a_spec (fun _ _ -> ()) (fun _ -> LowStar.Buffer.loc_none) spec
(fun j -> uint_to_bytes_be (sub res (j *! numb) numb) b.(len -! j -! 1ul));
norm_spec [delta_only [`%S.bn_to_bytes_be_]] (S.bn_to_bytes_be_ (v len) (as_seq h0 b))
inline_for_extraction noextract
let bn_to_bytes_be_st (t:limb_t) (len:size_t{0 < v len /\ numbytes t * v (blocks len (size (numbytes t))) <= max_size_t}) =
b:lbignum t (blocks len (size (numbytes t)))
-> res:lbuffer uint8 len ->
Stack unit
(requires fun h ->
live h b /\ live h res /\ disjoint res b)
(ensures fun h0 _ h1 -> modifies (loc res) h0 h1 /\
as_seq h1 res == S.bn_to_bytes_be (v len) (as_seq h0 b))
inline_for_extraction noextract
val mk_bn_to_bytes_be:
#t:limb_t
-> is_known_len:bool
-> len:size_t{0 < v len /\ numbytes t * v (blocks len (size (numbytes t))) <= max_size_t} ->
bn_to_bytes_be_st t len
let mk_bn_to_bytes_be #t is_known_len len b res =
push_frame ();
if is_known_len then begin
[@inline_let] let numb = size (numbytes t) in
[@inline_let] let bnLen = blocks len numb in
[@inline_let] let tmpLen = numb *! bnLen in
let tmp = create tmpLen (u8 0) in
if tmpLen =. len then begin
LowStar.Ignore.ignore tmp;
bn_to_bytes_be_ bnLen b res end
else begin
bn_to_bytes_be_ bnLen b tmp;
copy res (sub tmp (tmpLen -! len) len) end end
else begin
[@inline_let] let numb = size (numbytes t) in
let bnLen = blocks len numb in
let tmpLen = numb *! bnLen in
let tmp = create tmpLen (u8 0) in
bn_to_bytes_be_ bnLen b tmp;
copy res (sub tmp (tmpLen -! len) len) end;
pop_frame () | false | false | Hacl.Bignum.Convert.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 50,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val bn_to_bytes_be_uint32 (len: _) : bn_to_bytes_be_st U32 len | [] | Hacl.Bignum.Convert.bn_to_bytes_be_uint32 | {
"file_name": "code/bignum/Hacl.Bignum.Convert.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
len:
Lib.IntTypes.size_t
{ 0 < Lib.IntTypes.v len /\
Lib.IntTypes.numbytes Lib.IntTypes.U32 *
Lib.IntTypes.v (Hacl.Bignum.Definitions.blocks len
(Lib.IntTypes.size (Lib.IntTypes.numbytes Lib.IntTypes.U32))) <=
Lib.IntTypes.max_size_t }
-> Hacl.Bignum.Convert.bn_to_bytes_be_st Lib.IntTypes.U32 len | {
"end_col": 92,
"end_line": 216,
"start_col": 60,
"start_line": 216
} |
Prims.Tot | [
{
"abbrev": true,
"full_module": "Hacl.Spec.Bignum.Convert",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Lib.Sequence",
"short_module": "LSeq"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "ST"
},
{
"abbrev": false,
"full_module": "Hacl.Bignum.Definitions",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteBuffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Buffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack.ST",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Bignum",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Bignum",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let bn_to_bytes_be_st (t:limb_t) (len:size_t{0 < v len /\ numbytes t * v (blocks len (size (numbytes t))) <= max_size_t}) =
b:lbignum t (blocks len (size (numbytes t)))
-> res:lbuffer uint8 len ->
Stack unit
(requires fun h ->
live h b /\ live h res /\ disjoint res b)
(ensures fun h0 _ h1 -> modifies (loc res) h0 h1 /\
as_seq h1 res == S.bn_to_bytes_be (v len) (as_seq h0 b)) | let bn_to_bytes_be_st
(t: limb_t)
(len: size_t{0 < v len /\ numbytes t * v (blocks len (size (numbytes t))) <= max_size_t})
= | false | null | false | b: lbignum t (blocks len (size (numbytes t))) -> res: lbuffer uint8 len
-> Stack unit
(requires fun h -> live h b /\ live h res /\ disjoint res b)
(ensures
fun h0 _ h1 ->
modifies (loc res) h0 h1 /\ as_seq h1 res == S.bn_to_bytes_be (v len) (as_seq h0 b)) | {
"checked_file": "Hacl.Bignum.Convert.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Ignore.fsti.checked",
"LowStar.Buffer.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteBuffer.fsti.checked",
"Lib.Buffer.fsti.checked",
"Hacl.Spec.Bignum.Convert.fst.checked",
"Hacl.Bignum.Definitions.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked"
],
"interface_file": false,
"source_file": "Hacl.Bignum.Convert.fst"
} | [
"total"
] | [
"Hacl.Bignum.Definitions.limb_t",
"Lib.IntTypes.size_t",
"Prims.l_and",
"Prims.b2t",
"Prims.op_LessThan",
"Lib.IntTypes.v",
"Lib.IntTypes.U32",
"Lib.IntTypes.PUB",
"Prims.op_LessThanOrEqual",
"FStar.Mul.op_Star",
"Lib.IntTypes.numbytes",
"Hacl.Bignum.Definitions.blocks",
"Lib.IntTypes.size",
"Lib.IntTypes.max_size_t",
"Hacl.Bignum.Definitions.lbignum",
"Lib.Buffer.lbuffer",
"Lib.IntTypes.uint8",
"Prims.unit",
"FStar.Monotonic.HyperStack.mem",
"Lib.Buffer.live",
"Lib.Buffer.MUT",
"Hacl.Bignum.Definitions.limb",
"Lib.Buffer.disjoint",
"Lib.Buffer.modifies",
"Lib.Buffer.loc",
"Prims.eq2",
"Lib.Sequence.lseq",
"Lib.Buffer.as_seq",
"Hacl.Spec.Bignum.Convert.bn_to_bytes_be"
] | [] | module Hacl.Bignum.Convert
open FStar.HyperStack
open FStar.HyperStack.ST
open FStar.Mul
open Lib.IntTypes
open Lib.Buffer
open Lib.ByteBuffer
open Hacl.Bignum.Definitions
module ST = FStar.HyperStack.ST
module LSeq = Lib.Sequence
module S = Hacl.Spec.Bignum.Convert
#reset-options "--z3rlimit 50 --fuel 0 --ifuel 0"
inline_for_extraction noextract
val bn_from_uint:
#t:limb_t
-> len:size_t{0 < v len}
-> x:limb t
-> b:lbignum t len ->
Stack unit
(requires fun h -> live h b)
(ensures fun h0 _ h1 -> modifies (loc b) h0 h1 /\
as_seq h1 b == S.bn_from_uint (v len) x)
let bn_from_uint #t len x b =
memset b (uint #t 0) len;
b.(0ul) <- x
inline_for_extraction noextract
val bn_from_bytes_be_:
#t:limb_t
-> len:size_t{numbytes t * v len <= max_size_t}
-> b:lbuffer uint8 (size (numbytes t) *! len)
-> res:lbignum t len ->
Stack unit
(requires fun h -> live h b /\ live h res /\ disjoint res b)
(ensures fun h0 _ h1 -> modifies (loc res) h0 h1 /\
as_seq h1 res == S.bn_from_bytes_be_ (v len) (as_seq h0 b))
let bn_from_bytes_be_ #t len b res =
let h0 = ST.get () in
[@inline_let]
let spec h = S.bn_from_bytes_be_f (v len) (as_seq h b) in
fill h0 len res spec
(fun j -> uint_from_bytes_be (sub b ((len -! j -! 1ul) *! (size (numbytes t))) (size (numbytes t))))
inline_for_extraction noextract
let bn_from_bytes_be_st (t:limb_t) =
len:size_t{0 < v len /\ numbytes t * v (blocks len (size (numbytes t))) <= max_size_t}
-> b:lbuffer uint8 len
-> res:lbignum t (blocks len (size (numbytes t))) ->
Stack unit
(requires fun h -> live h b /\ live h res /\ disjoint res b)
(ensures fun h0 _ h1 -> modifies (loc res) h0 h1 /\
as_seq h1 res == S.bn_from_bytes_be (v len) (as_seq h0 b))
inline_for_extraction noextract
val mk_bn_from_bytes_be: #t:limb_t -> is_known_len:bool -> bn_from_bytes_be_st t
let mk_bn_from_bytes_be #t is_known_len len b res =
push_frame ();
if is_known_len then begin
[@inline_let] let numb = size (numbytes t) in
[@inline_let] let bnLen = blocks len numb in
[@inline_let] let tmpLen = numb *! bnLen in
if tmpLen =. len then
bn_from_bytes_be_ bnLen b res
else begin
let tmp = create tmpLen (u8 0) in
update_sub tmp (tmpLen -! len) len b;
bn_from_bytes_be_ bnLen tmp res end end
else begin
[@inline_let] let numb = size (numbytes t) in
let bnLen = blocks len numb in
let tmpLen = numb *! bnLen in
let tmp = create tmpLen (u8 0) in
update_sub tmp (tmpLen -! len) len b;
bn_from_bytes_be_ bnLen tmp res end;
pop_frame ()
[@CInline]
let bn_from_bytes_be_uint32 : bn_from_bytes_be_st U32 = mk_bn_from_bytes_be #U32 false
let bn_from_bytes_be_uint64 : bn_from_bytes_be_st U64 = mk_bn_from_bytes_be #U64 false
inline_for_extraction noextract
val bn_from_bytes_be: #t:limb_t -> bn_from_bytes_be_st t
let bn_from_bytes_be #t =
match t with
| U32 -> bn_from_bytes_be_uint32
| U64 -> bn_from_bytes_be_uint64
inline_for_extraction noextract
let bn_from_bytes_le_st (t:limb_t) =
len:size_t{0 < v len /\ numbytes t * v (blocks len (size (numbytes t))) <= max_size_t}
-> b:lbuffer uint8 len
-> res:lbignum t (blocks len (size (numbytes t))) ->
Stack unit
(requires fun h -> live h b /\ live h res /\ disjoint res b)
(ensures fun h0 _ h1 -> modifies (loc res) h0 h1 /\
as_seq h1 res == S.bn_from_bytes_le (v len) (as_seq h0 b))
inline_for_extraction noextract
val mk_bn_from_bytes_le: #t:limb_t -> is_known_len:bool -> bn_from_bytes_le_st t
let mk_bn_from_bytes_le #t is_known_len len b res =
push_frame ();
if is_known_len then begin
[@inline_let] let numb = size (numbytes t) in
[@inline_let] let bnLen = blocks len numb in
[@inline_let] let tmpLen = numb *! bnLen in
if tmpLen =. len then
uints_from_bytes_le res b
else begin
let tmp = create tmpLen (u8 0) in
update_sub tmp 0ul len b;
uints_from_bytes_le res tmp end end
else begin
[@inline_let] let numb = size (numbytes t) in
let bnLen = blocks len numb in
let tmpLen = numb *! bnLen in
let tmp = create tmpLen (u8 0) in
update_sub tmp 0ul len b;
uints_from_bytes_le res tmp end;
pop_frame ()
[@CInline]
let bn_from_bytes_le_uint32 : bn_from_bytes_le_st U32 = mk_bn_from_bytes_le #U32 false
[@CInline]
let bn_from_bytes_le_uint64 : bn_from_bytes_le_st U64 = mk_bn_from_bytes_le #U64 false
inline_for_extraction noextract
val bn_from_bytes_le: #t:limb_t -> bn_from_bytes_le_st t
let bn_from_bytes_le #t =
match t with
| U32 -> bn_from_bytes_le_uint32
| U64 -> bn_from_bytes_le_uint64
inline_for_extraction noextract
val bn_to_bytes_be_:
#t:limb_t
-> len:size_t{numbytes t * v len <= max_size_t}
-> b:lbignum t len
-> res:lbuffer uint8 (size (numbytes t) *! len) ->
Stack unit
(requires fun h -> live h b /\ live h res /\ disjoint res b)
(ensures fun h0 _ h1 -> modifies (loc res) h0 h1 /\
as_seq h1 res == S.bn_to_bytes_be_ (v len) (as_seq h0 b))
let bn_to_bytes_be_ #t len b res =
[@inline_let] let numb = size (numbytes t) in
let h0 = ST.get () in
[@ inline_let]
let a_spec (i:nat{i <= v len}) = unit in
[@ inline_let]
let spec (h:mem) = S.bn_to_bytes_be_f (v len) (as_seq h b) in
fill_blocks h0 numb len res a_spec (fun _ _ -> ()) (fun _ -> LowStar.Buffer.loc_none) spec
(fun j -> uint_to_bytes_be (sub res (j *! numb) numb) b.(len -! j -! 1ul));
norm_spec [delta_only [`%S.bn_to_bytes_be_]] (S.bn_to_bytes_be_ (v len) (as_seq h0 b))
inline_for_extraction noextract | false | false | Hacl.Bignum.Convert.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 50,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val bn_to_bytes_be_st : t: Hacl.Bignum.Definitions.limb_t ->
len:
Lib.IntTypes.size_t
{ 0 < Lib.IntTypes.v len /\
Lib.IntTypes.numbytes t *
Lib.IntTypes.v (Hacl.Bignum.Definitions.blocks len
(Lib.IntTypes.size (Lib.IntTypes.numbytes t))) <=
Lib.IntTypes.max_size_t }
-> Type0 | [] | Hacl.Bignum.Convert.bn_to_bytes_be_st | {
"file_name": "code/bignum/Hacl.Bignum.Convert.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
t: Hacl.Bignum.Definitions.limb_t ->
len:
Lib.IntTypes.size_t
{ 0 < Lib.IntTypes.v len /\
Lib.IntTypes.numbytes t *
Lib.IntTypes.v (Hacl.Bignum.Definitions.blocks len
(Lib.IntTypes.size (Lib.IntTypes.numbytes t))) <=
Lib.IntTypes.max_size_t }
-> Type0 | {
"end_col": 60,
"end_line": 183,
"start_col": 4,
"start_line": 177
} |
|
Prims.Tot | val bn_to_bytes_be_uint64 (len: _) : bn_to_bytes_be_st U64 len | [
{
"abbrev": true,
"full_module": "Hacl.Spec.Bignum.Convert",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Lib.Sequence",
"short_module": "LSeq"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "ST"
},
{
"abbrev": false,
"full_module": "Hacl.Bignum.Definitions",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteBuffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Buffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack.ST",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Bignum",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Bignum",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let bn_to_bytes_be_uint64 len : bn_to_bytes_be_st U64 len = mk_bn_to_bytes_be #U64 false len | val bn_to_bytes_be_uint64 (len: _) : bn_to_bytes_be_st U64 len
let bn_to_bytes_be_uint64 len : bn_to_bytes_be_st U64 len = | false | null | false | mk_bn_to_bytes_be #U64 false len | {
"checked_file": "Hacl.Bignum.Convert.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Ignore.fsti.checked",
"LowStar.Buffer.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteBuffer.fsti.checked",
"Lib.Buffer.fsti.checked",
"Hacl.Spec.Bignum.Convert.fst.checked",
"Hacl.Bignum.Definitions.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked"
],
"interface_file": false,
"source_file": "Hacl.Bignum.Convert.fst"
} | [
"total"
] | [
"Lib.IntTypes.size_t",
"Prims.l_and",
"Prims.b2t",
"Prims.op_LessThan",
"Lib.IntTypes.v",
"Lib.IntTypes.U32",
"Lib.IntTypes.PUB",
"Prims.op_LessThanOrEqual",
"FStar.Mul.op_Star",
"Lib.IntTypes.numbytes",
"Lib.IntTypes.U64",
"Hacl.Bignum.Definitions.blocks",
"Lib.IntTypes.size",
"Lib.IntTypes.max_size_t",
"Hacl.Bignum.Convert.mk_bn_to_bytes_be",
"Hacl.Bignum.Convert.bn_to_bytes_be_st"
] | [] | module Hacl.Bignum.Convert
open FStar.HyperStack
open FStar.HyperStack.ST
open FStar.Mul
open Lib.IntTypes
open Lib.Buffer
open Lib.ByteBuffer
open Hacl.Bignum.Definitions
module ST = FStar.HyperStack.ST
module LSeq = Lib.Sequence
module S = Hacl.Spec.Bignum.Convert
#reset-options "--z3rlimit 50 --fuel 0 --ifuel 0"
inline_for_extraction noextract
val bn_from_uint:
#t:limb_t
-> len:size_t{0 < v len}
-> x:limb t
-> b:lbignum t len ->
Stack unit
(requires fun h -> live h b)
(ensures fun h0 _ h1 -> modifies (loc b) h0 h1 /\
as_seq h1 b == S.bn_from_uint (v len) x)
let bn_from_uint #t len x b =
memset b (uint #t 0) len;
b.(0ul) <- x
inline_for_extraction noextract
val bn_from_bytes_be_:
#t:limb_t
-> len:size_t{numbytes t * v len <= max_size_t}
-> b:lbuffer uint8 (size (numbytes t) *! len)
-> res:lbignum t len ->
Stack unit
(requires fun h -> live h b /\ live h res /\ disjoint res b)
(ensures fun h0 _ h1 -> modifies (loc res) h0 h1 /\
as_seq h1 res == S.bn_from_bytes_be_ (v len) (as_seq h0 b))
let bn_from_bytes_be_ #t len b res =
let h0 = ST.get () in
[@inline_let]
let spec h = S.bn_from_bytes_be_f (v len) (as_seq h b) in
fill h0 len res spec
(fun j -> uint_from_bytes_be (sub b ((len -! j -! 1ul) *! (size (numbytes t))) (size (numbytes t))))
inline_for_extraction noextract
let bn_from_bytes_be_st (t:limb_t) =
len:size_t{0 < v len /\ numbytes t * v (blocks len (size (numbytes t))) <= max_size_t}
-> b:lbuffer uint8 len
-> res:lbignum t (blocks len (size (numbytes t))) ->
Stack unit
(requires fun h -> live h b /\ live h res /\ disjoint res b)
(ensures fun h0 _ h1 -> modifies (loc res) h0 h1 /\
as_seq h1 res == S.bn_from_bytes_be (v len) (as_seq h0 b))
inline_for_extraction noextract
val mk_bn_from_bytes_be: #t:limb_t -> is_known_len:bool -> bn_from_bytes_be_st t
let mk_bn_from_bytes_be #t is_known_len len b res =
push_frame ();
if is_known_len then begin
[@inline_let] let numb = size (numbytes t) in
[@inline_let] let bnLen = blocks len numb in
[@inline_let] let tmpLen = numb *! bnLen in
if tmpLen =. len then
bn_from_bytes_be_ bnLen b res
else begin
let tmp = create tmpLen (u8 0) in
update_sub tmp (tmpLen -! len) len b;
bn_from_bytes_be_ bnLen tmp res end end
else begin
[@inline_let] let numb = size (numbytes t) in
let bnLen = blocks len numb in
let tmpLen = numb *! bnLen in
let tmp = create tmpLen (u8 0) in
update_sub tmp (tmpLen -! len) len b;
bn_from_bytes_be_ bnLen tmp res end;
pop_frame ()
[@CInline]
let bn_from_bytes_be_uint32 : bn_from_bytes_be_st U32 = mk_bn_from_bytes_be #U32 false
let bn_from_bytes_be_uint64 : bn_from_bytes_be_st U64 = mk_bn_from_bytes_be #U64 false
inline_for_extraction noextract
val bn_from_bytes_be: #t:limb_t -> bn_from_bytes_be_st t
let bn_from_bytes_be #t =
match t with
| U32 -> bn_from_bytes_be_uint32
| U64 -> bn_from_bytes_be_uint64
inline_for_extraction noextract
let bn_from_bytes_le_st (t:limb_t) =
len:size_t{0 < v len /\ numbytes t * v (blocks len (size (numbytes t))) <= max_size_t}
-> b:lbuffer uint8 len
-> res:lbignum t (blocks len (size (numbytes t))) ->
Stack unit
(requires fun h -> live h b /\ live h res /\ disjoint res b)
(ensures fun h0 _ h1 -> modifies (loc res) h0 h1 /\
as_seq h1 res == S.bn_from_bytes_le (v len) (as_seq h0 b))
inline_for_extraction noextract
val mk_bn_from_bytes_le: #t:limb_t -> is_known_len:bool -> bn_from_bytes_le_st t
let mk_bn_from_bytes_le #t is_known_len len b res =
push_frame ();
if is_known_len then begin
[@inline_let] let numb = size (numbytes t) in
[@inline_let] let bnLen = blocks len numb in
[@inline_let] let tmpLen = numb *! bnLen in
if tmpLen =. len then
uints_from_bytes_le res b
else begin
let tmp = create tmpLen (u8 0) in
update_sub tmp 0ul len b;
uints_from_bytes_le res tmp end end
else begin
[@inline_let] let numb = size (numbytes t) in
let bnLen = blocks len numb in
let tmpLen = numb *! bnLen in
let tmp = create tmpLen (u8 0) in
update_sub tmp 0ul len b;
uints_from_bytes_le res tmp end;
pop_frame ()
[@CInline]
let bn_from_bytes_le_uint32 : bn_from_bytes_le_st U32 = mk_bn_from_bytes_le #U32 false
[@CInline]
let bn_from_bytes_le_uint64 : bn_from_bytes_le_st U64 = mk_bn_from_bytes_le #U64 false
inline_for_extraction noextract
val bn_from_bytes_le: #t:limb_t -> bn_from_bytes_le_st t
let bn_from_bytes_le #t =
match t with
| U32 -> bn_from_bytes_le_uint32
| U64 -> bn_from_bytes_le_uint64
inline_for_extraction noextract
val bn_to_bytes_be_:
#t:limb_t
-> len:size_t{numbytes t * v len <= max_size_t}
-> b:lbignum t len
-> res:lbuffer uint8 (size (numbytes t) *! len) ->
Stack unit
(requires fun h -> live h b /\ live h res /\ disjoint res b)
(ensures fun h0 _ h1 -> modifies (loc res) h0 h1 /\
as_seq h1 res == S.bn_to_bytes_be_ (v len) (as_seq h0 b))
let bn_to_bytes_be_ #t len b res =
[@inline_let] let numb = size (numbytes t) in
let h0 = ST.get () in
[@ inline_let]
let a_spec (i:nat{i <= v len}) = unit in
[@ inline_let]
let spec (h:mem) = S.bn_to_bytes_be_f (v len) (as_seq h b) in
fill_blocks h0 numb len res a_spec (fun _ _ -> ()) (fun _ -> LowStar.Buffer.loc_none) spec
(fun j -> uint_to_bytes_be (sub res (j *! numb) numb) b.(len -! j -! 1ul));
norm_spec [delta_only [`%S.bn_to_bytes_be_]] (S.bn_to_bytes_be_ (v len) (as_seq h0 b))
inline_for_extraction noextract
let bn_to_bytes_be_st (t:limb_t) (len:size_t{0 < v len /\ numbytes t * v (blocks len (size (numbytes t))) <= max_size_t}) =
b:lbignum t (blocks len (size (numbytes t)))
-> res:lbuffer uint8 len ->
Stack unit
(requires fun h ->
live h b /\ live h res /\ disjoint res b)
(ensures fun h0 _ h1 -> modifies (loc res) h0 h1 /\
as_seq h1 res == S.bn_to_bytes_be (v len) (as_seq h0 b))
inline_for_extraction noextract
val mk_bn_to_bytes_be:
#t:limb_t
-> is_known_len:bool
-> len:size_t{0 < v len /\ numbytes t * v (blocks len (size (numbytes t))) <= max_size_t} ->
bn_to_bytes_be_st t len
let mk_bn_to_bytes_be #t is_known_len len b res =
push_frame ();
if is_known_len then begin
[@inline_let] let numb = size (numbytes t) in
[@inline_let] let bnLen = blocks len numb in
[@inline_let] let tmpLen = numb *! bnLen in
let tmp = create tmpLen (u8 0) in
if tmpLen =. len then begin
LowStar.Ignore.ignore tmp;
bn_to_bytes_be_ bnLen b res end
else begin
bn_to_bytes_be_ bnLen b tmp;
copy res (sub tmp (tmpLen -! len) len) end end
else begin
[@inline_let] let numb = size (numbytes t) in
let bnLen = blocks len numb in
let tmpLen = numb *! bnLen in
let tmp = create tmpLen (u8 0) in
bn_to_bytes_be_ bnLen b tmp;
copy res (sub tmp (tmpLen -! len) len) end;
pop_frame ()
[@CInline] | false | false | Hacl.Bignum.Convert.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 50,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val bn_to_bytes_be_uint64 (len: _) : bn_to_bytes_be_st U64 len | [] | Hacl.Bignum.Convert.bn_to_bytes_be_uint64 | {
"file_name": "code/bignum/Hacl.Bignum.Convert.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
len:
Lib.IntTypes.size_t
{ 0 < Lib.IntTypes.v len /\
Lib.IntTypes.numbytes Lib.IntTypes.U64 *
Lib.IntTypes.v (Hacl.Bignum.Definitions.blocks len
(Lib.IntTypes.size (Lib.IntTypes.numbytes Lib.IntTypes.U64))) <=
Lib.IntTypes.max_size_t }
-> Hacl.Bignum.Convert.bn_to_bytes_be_st Lib.IntTypes.U64 len | {
"end_col": 92,
"end_line": 217,
"start_col": 60,
"start_line": 217
} |
Prims.Tot | val bn_to_bytes_le_uint32 (len: _) : bn_to_bytes_le_st U32 len | [
{
"abbrev": true,
"full_module": "Hacl.Spec.Bignum.Convert",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Lib.Sequence",
"short_module": "LSeq"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "ST"
},
{
"abbrev": false,
"full_module": "Hacl.Bignum.Definitions",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteBuffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Buffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack.ST",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Bignum",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Bignum",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let bn_to_bytes_le_uint32 len : bn_to_bytes_le_st U32 len = mk_bn_to_bytes_le #U32 false len | val bn_to_bytes_le_uint32 (len: _) : bn_to_bytes_le_st U32 len
let bn_to_bytes_le_uint32 len : bn_to_bytes_le_st U32 len = | false | null | false | mk_bn_to_bytes_le #U32 false len | {
"checked_file": "Hacl.Bignum.Convert.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.Ignore.fsti.checked",
"LowStar.Buffer.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteBuffer.fsti.checked",
"Lib.Buffer.fsti.checked",
"Hacl.Spec.Bignum.Convert.fst.checked",
"Hacl.Bignum.Definitions.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked"
],
"interface_file": false,
"source_file": "Hacl.Bignum.Convert.fst"
} | [
"total"
] | [
"Lib.IntTypes.size_t",
"Prims.l_and",
"Prims.b2t",
"Prims.op_LessThan",
"Lib.IntTypes.v",
"Lib.IntTypes.U32",
"Lib.IntTypes.PUB",
"Prims.op_LessThanOrEqual",
"FStar.Mul.op_Star",
"Lib.IntTypes.numbytes",
"Hacl.Bignum.Definitions.blocks",
"Lib.IntTypes.size",
"Lib.IntTypes.max_size_t",
"Hacl.Bignum.Convert.mk_bn_to_bytes_le",
"Hacl.Bignum.Convert.bn_to_bytes_le_st"
] | [] | module Hacl.Bignum.Convert
open FStar.HyperStack
open FStar.HyperStack.ST
open FStar.Mul
open Lib.IntTypes
open Lib.Buffer
open Lib.ByteBuffer
open Hacl.Bignum.Definitions
module ST = FStar.HyperStack.ST
module LSeq = Lib.Sequence
module S = Hacl.Spec.Bignum.Convert
#reset-options "--z3rlimit 50 --fuel 0 --ifuel 0"
inline_for_extraction noextract
val bn_from_uint:
#t:limb_t
-> len:size_t{0 < v len}
-> x:limb t
-> b:lbignum t len ->
Stack unit
(requires fun h -> live h b)
(ensures fun h0 _ h1 -> modifies (loc b) h0 h1 /\
as_seq h1 b == S.bn_from_uint (v len) x)
let bn_from_uint #t len x b =
memset b (uint #t 0) len;
b.(0ul) <- x
inline_for_extraction noextract
val bn_from_bytes_be_:
#t:limb_t
-> len:size_t{numbytes t * v len <= max_size_t}
-> b:lbuffer uint8 (size (numbytes t) *! len)
-> res:lbignum t len ->
Stack unit
(requires fun h -> live h b /\ live h res /\ disjoint res b)
(ensures fun h0 _ h1 -> modifies (loc res) h0 h1 /\
as_seq h1 res == S.bn_from_bytes_be_ (v len) (as_seq h0 b))
let bn_from_bytes_be_ #t len b res =
let h0 = ST.get () in
[@inline_let]
let spec h = S.bn_from_bytes_be_f (v len) (as_seq h b) in
fill h0 len res spec
(fun j -> uint_from_bytes_be (sub b ((len -! j -! 1ul) *! (size (numbytes t))) (size (numbytes t))))
inline_for_extraction noextract
let bn_from_bytes_be_st (t:limb_t) =
len:size_t{0 < v len /\ numbytes t * v (blocks len (size (numbytes t))) <= max_size_t}
-> b:lbuffer uint8 len
-> res:lbignum t (blocks len (size (numbytes t))) ->
Stack unit
(requires fun h -> live h b /\ live h res /\ disjoint res b)
(ensures fun h0 _ h1 -> modifies (loc res) h0 h1 /\
as_seq h1 res == S.bn_from_bytes_be (v len) (as_seq h0 b))
inline_for_extraction noextract
val mk_bn_from_bytes_be: #t:limb_t -> is_known_len:bool -> bn_from_bytes_be_st t
let mk_bn_from_bytes_be #t is_known_len len b res =
push_frame ();
if is_known_len then begin
[@inline_let] let numb = size (numbytes t) in
[@inline_let] let bnLen = blocks len numb in
[@inline_let] let tmpLen = numb *! bnLen in
if tmpLen =. len then
bn_from_bytes_be_ bnLen b res
else begin
let tmp = create tmpLen (u8 0) in
update_sub tmp (tmpLen -! len) len b;
bn_from_bytes_be_ bnLen tmp res end end
else begin
[@inline_let] let numb = size (numbytes t) in
let bnLen = blocks len numb in
let tmpLen = numb *! bnLen in
let tmp = create tmpLen (u8 0) in
update_sub tmp (tmpLen -! len) len b;
bn_from_bytes_be_ bnLen tmp res end;
pop_frame ()
[@CInline]
let bn_from_bytes_be_uint32 : bn_from_bytes_be_st U32 = mk_bn_from_bytes_be #U32 false
let bn_from_bytes_be_uint64 : bn_from_bytes_be_st U64 = mk_bn_from_bytes_be #U64 false
inline_for_extraction noextract
val bn_from_bytes_be: #t:limb_t -> bn_from_bytes_be_st t
let bn_from_bytes_be #t =
match t with
| U32 -> bn_from_bytes_be_uint32
| U64 -> bn_from_bytes_be_uint64
inline_for_extraction noextract
let bn_from_bytes_le_st (t:limb_t) =
len:size_t{0 < v len /\ numbytes t * v (blocks len (size (numbytes t))) <= max_size_t}
-> b:lbuffer uint8 len
-> res:lbignum t (blocks len (size (numbytes t))) ->
Stack unit
(requires fun h -> live h b /\ live h res /\ disjoint res b)
(ensures fun h0 _ h1 -> modifies (loc res) h0 h1 /\
as_seq h1 res == S.bn_from_bytes_le (v len) (as_seq h0 b))
inline_for_extraction noextract
val mk_bn_from_bytes_le: #t:limb_t -> is_known_len:bool -> bn_from_bytes_le_st t
let mk_bn_from_bytes_le #t is_known_len len b res =
push_frame ();
if is_known_len then begin
[@inline_let] let numb = size (numbytes t) in
[@inline_let] let bnLen = blocks len numb in
[@inline_let] let tmpLen = numb *! bnLen in
if tmpLen =. len then
uints_from_bytes_le res b
else begin
let tmp = create tmpLen (u8 0) in
update_sub tmp 0ul len b;
uints_from_bytes_le res tmp end end
else begin
[@inline_let] let numb = size (numbytes t) in
let bnLen = blocks len numb in
let tmpLen = numb *! bnLen in
let tmp = create tmpLen (u8 0) in
update_sub tmp 0ul len b;
uints_from_bytes_le res tmp end;
pop_frame ()
[@CInline]
let bn_from_bytes_le_uint32 : bn_from_bytes_le_st U32 = mk_bn_from_bytes_le #U32 false
[@CInline]
let bn_from_bytes_le_uint64 : bn_from_bytes_le_st U64 = mk_bn_from_bytes_le #U64 false
inline_for_extraction noextract
val bn_from_bytes_le: #t:limb_t -> bn_from_bytes_le_st t
let bn_from_bytes_le #t =
match t with
| U32 -> bn_from_bytes_le_uint32
| U64 -> bn_from_bytes_le_uint64
inline_for_extraction noextract
val bn_to_bytes_be_:
#t:limb_t
-> len:size_t{numbytes t * v len <= max_size_t}
-> b:lbignum t len
-> res:lbuffer uint8 (size (numbytes t) *! len) ->
Stack unit
(requires fun h -> live h b /\ live h res /\ disjoint res b)
(ensures fun h0 _ h1 -> modifies (loc res) h0 h1 /\
as_seq h1 res == S.bn_to_bytes_be_ (v len) (as_seq h0 b))
let bn_to_bytes_be_ #t len b res =
[@inline_let] let numb = size (numbytes t) in
let h0 = ST.get () in
[@ inline_let]
let a_spec (i:nat{i <= v len}) = unit in
[@ inline_let]
let spec (h:mem) = S.bn_to_bytes_be_f (v len) (as_seq h b) in
fill_blocks h0 numb len res a_spec (fun _ _ -> ()) (fun _ -> LowStar.Buffer.loc_none) spec
(fun j -> uint_to_bytes_be (sub res (j *! numb) numb) b.(len -! j -! 1ul));
norm_spec [delta_only [`%S.bn_to_bytes_be_]] (S.bn_to_bytes_be_ (v len) (as_seq h0 b))
inline_for_extraction noextract
let bn_to_bytes_be_st (t:limb_t) (len:size_t{0 < v len /\ numbytes t * v (blocks len (size (numbytes t))) <= max_size_t}) =
b:lbignum t (blocks len (size (numbytes t)))
-> res:lbuffer uint8 len ->
Stack unit
(requires fun h ->
live h b /\ live h res /\ disjoint res b)
(ensures fun h0 _ h1 -> modifies (loc res) h0 h1 /\
as_seq h1 res == S.bn_to_bytes_be (v len) (as_seq h0 b))
inline_for_extraction noextract
val mk_bn_to_bytes_be:
#t:limb_t
-> is_known_len:bool
-> len:size_t{0 < v len /\ numbytes t * v (blocks len (size (numbytes t))) <= max_size_t} ->
bn_to_bytes_be_st t len
let mk_bn_to_bytes_be #t is_known_len len b res =
push_frame ();
if is_known_len then begin
[@inline_let] let numb = size (numbytes t) in
[@inline_let] let bnLen = blocks len numb in
[@inline_let] let tmpLen = numb *! bnLen in
let tmp = create tmpLen (u8 0) in
if tmpLen =. len then begin
LowStar.Ignore.ignore tmp;
bn_to_bytes_be_ bnLen b res end
else begin
bn_to_bytes_be_ bnLen b tmp;
copy res (sub tmp (tmpLen -! len) len) end end
else begin
[@inline_let] let numb = size (numbytes t) in
let bnLen = blocks len numb in
let tmpLen = numb *! bnLen in
let tmp = create tmpLen (u8 0) in
bn_to_bytes_be_ bnLen b tmp;
copy res (sub tmp (tmpLen -! len) len) end;
pop_frame ()
[@CInline]
let bn_to_bytes_be_uint32 len : bn_to_bytes_be_st U32 len = mk_bn_to_bytes_be #U32 false len
let bn_to_bytes_be_uint64 len : bn_to_bytes_be_st U64 len = mk_bn_to_bytes_be #U64 false len
inline_for_extraction noextract
val bn_to_bytes_be: #t:_ -> len:_ -> bn_to_bytes_be_st t len
let bn_to_bytes_be #t =
match t with
| U32 -> bn_to_bytes_be_uint32
| U64 -> bn_to_bytes_be_uint64
inline_for_extraction noextract
let bn_to_bytes_le_st (t:limb_t) (len:size_t{0 < v len /\ numbytes t * v (blocks len (size (numbytes t))) <= max_size_t}) =
b:lbignum t (blocks len (size (numbytes t)))
-> res:lbuffer uint8 len ->
Stack unit
(requires fun h ->
live h b /\ live h res /\ disjoint res b)
(ensures fun h0 _ h1 -> modifies (loc res) h0 h1 /\
as_seq h1 res == S.bn_to_bytes_le (v len) (as_seq h0 b))
inline_for_extraction noextract
val mk_bn_to_bytes_le:
#t:limb_t
-> is_known_len:bool
-> len:size_t{0 < v len /\ numbytes t * v (blocks len (size (numbytes t))) <= max_size_t} ->
bn_to_bytes_le_st t len
let mk_bn_to_bytes_le #t is_known_len len b res =
push_frame ();
if is_known_len then begin
[@inline_let] let numb = size (numbytes t) in
[@inline_let] let bnLen = blocks len numb in
[@inline_let] let tmpLen = numb *! bnLen in
let tmp = create tmpLen (u8 0) in
if tmpLen =. len then begin
LowStar.Ignore.ignore tmp;
uints_to_bytes_le bnLen res b end
else begin
uints_to_bytes_le bnLen tmp b;
copy res (sub tmp 0ul len) end end
else begin
[@inline_let] let numb = size (numbytes t) in
let bnLen = blocks len numb in
let tmpLen = numb *! bnLen in
let tmp = create tmpLen (u8 0) in
uints_to_bytes_le bnLen tmp b;
copy res (sub tmp 0ul len) end;
pop_frame () | false | false | Hacl.Bignum.Convert.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 50,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val bn_to_bytes_le_uint32 (len: _) : bn_to_bytes_le_st U32 len | [] | Hacl.Bignum.Convert.bn_to_bytes_le_uint32 | {
"file_name": "code/bignum/Hacl.Bignum.Convert.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
len:
Lib.IntTypes.size_t
{ 0 < Lib.IntTypes.v len /\
Lib.IntTypes.numbytes Lib.IntTypes.U32 *
Lib.IntTypes.v (Hacl.Bignum.Definitions.blocks len
(Lib.IntTypes.size (Lib.IntTypes.numbytes Lib.IntTypes.U32))) <=
Lib.IntTypes.max_size_t }
-> Hacl.Bignum.Convert.bn_to_bytes_le_st Lib.IntTypes.U32 len | {
"end_col": 92,
"end_line": 270,
"start_col": 60,
"start_line": 270
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.